Provide infrastructure for kubernetes upgrades
This commit provides the CLI and REST API infrastructure for kubernetes upgrades. It also includes support for: - starting a kubernetes upgrade - upgrading the kubernetes control plane components - upgrading the kubelets Note that these capabilities cannot be exercised in a standard load - they require a new kubernetes version to be defined in the load and patches created for the new kubernetes version. Change-Id: I0c2755fca89e93583e43fc5ace93bef8fc181766 Story: 2006781 Task: 37306 Task: 37579 Task: 37580 Task: 37581 Depends-On: https://review.opendev.org/#/c/695542 Signed-off-by: Bart Wensley <barton.wensley@windriver.com>
This commit is contained in:
parent
32dc0ce0b4
commit
535879dfd0
|
@ -91,10 +91,10 @@ fixtures = {
|
|||
}
|
||||
|
||||
|
||||
class ihostManagerTest(testtools.TestCase):
|
||||
class HostManagerTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ihostManagerTest, self).setUp()
|
||||
super(HostManagerTest, self).setUp()
|
||||
self.api = utils.FakeAPI(fixtures)
|
||||
self.mgr = cgtsclient.v1.ihost.ihostManager(self.api)
|
||||
|
|
@ -0,0 +1,220 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
from cgtsclient.tests import test_shell
|
||||
from cgtsclient.v1.ihost import ihost
|
||||
from cgtsclient.v1.kube_host_upgrade import KubeHostUpgrade
|
||||
|
||||
FAKE_KUBE_HOST_UPGRADE = {
|
||||
'id': 100,
|
||||
'uuid': '65d3fa7e-1414-4a1d-83b1-42c6bcac48bf',
|
||||
'target_version': 'v1.42.3',
|
||||
'status': 'fake status',
|
||||
'control_plane_version': 'v1.42.2',
|
||||
'kubelet_version': 'v1.42.2',
|
||||
'host_id': 67,
|
||||
}
|
||||
|
||||
FAKE_KUBE_HOST_UPGRADE_2 = {
|
||||
'id': 101,
|
||||
'uuid': '2044b22a-9fb3-4b62-9894-dcc6c0d0f791',
|
||||
'target_version': 'v1.42.3',
|
||||
'status': 'fake status',
|
||||
'control_plane_version': 'v1.42.3',
|
||||
'kubelet_version': 'v1.42.2',
|
||||
'host_id': 68,
|
||||
}
|
||||
|
||||
FAKE_KUBE_HOST_UPGRADE_3 = {
|
||||
'id': 102,
|
||||
'uuid': '0061fd45-0545-48e5-aa1b-fc3c809dfd3c',
|
||||
'target_version': None,
|
||||
'status': None,
|
||||
'control_plane_version': 'N/A',
|
||||
'kubelet_version': 'N/A',
|
||||
'host_id': 69,
|
||||
}
|
||||
|
||||
FAKE_IHOST = {
|
||||
'id': 67,
|
||||
'uuid': '7c4b5408-7097-4ab8-88fe-b8db156b1a8a',
|
||||
'hostname': 'controller-0',
|
||||
'personality': 'controller',
|
||||
}
|
||||
|
||||
FAKE_IHOST_2 = {
|
||||
'id': 68,
|
||||
'uuid': '62adea84-4fd5-4c78-b59a-a8eda2f2861c',
|
||||
'hostname': 'controller-1',
|
||||
'personality': 'controller',
|
||||
}
|
||||
|
||||
FAKE_IHOST_3 = {
|
||||
'id': 69,
|
||||
'uuid': '3a966002-14b9-4b96-bcf5-345ff50086b8',
|
||||
'hostname': 'storage-0',
|
||||
'personality': 'storage',
|
||||
}
|
||||
|
||||
|
||||
class HostTest(test_shell.ShellTest):
|
||||
|
||||
def setUp(self):
|
||||
super(HostTest, self).setUp()
|
||||
|
||||
# Mock the client
|
||||
p = mock.patch('cgtsclient.client._get_endpoint')
|
||||
self.mock_cgtsclient_client_get_endpoint = p.start()
|
||||
self.mock_cgtsclient_client_get_endpoint.return_value = \
|
||||
'http://fakelocalhost:6385/v1'
|
||||
self.addCleanup(p.stop)
|
||||
p = mock.patch('cgtsclient.client._get_ksclient')
|
||||
self.mock_cgtsclient_client_get_ksclient = p.start()
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the KubeHostUpgradeManager
|
||||
self.kube_host_upgrade_manager_list_result = [
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE, True)]
|
||||
|
||||
def mock_kube_host_upgrade_manager_list(obj):
|
||||
return self.kube_host_upgrade_manager_list_result
|
||||
self.mocked_kube_host_upgrade_manager_list = mock.patch(
|
||||
'cgtsclient.v1.kube_host_upgrade.KubeHostUpgradeManager.list',
|
||||
mock_kube_host_upgrade_manager_list)
|
||||
self.mocked_kube_host_upgrade_manager_list.start()
|
||||
self.addCleanup(self.mocked_kube_host_upgrade_manager_list.stop)
|
||||
|
||||
# Mock the ihostManager
|
||||
self.ihost_manager_list_result = [ihost(None, FAKE_IHOST, True)]
|
||||
|
||||
def mock_ihost_manager_list(obj):
|
||||
return self.ihost_manager_list_result
|
||||
self.mocked_ihost_manager_list = mock.patch(
|
||||
'cgtsclient.v1.ihost.ihostManager.list',
|
||||
mock_ihost_manager_list)
|
||||
self.mocked_ihost_manager_list.start()
|
||||
self.addCleanup(self.mocked_ihost_manager_list.stop)
|
||||
|
||||
self.ihost_manager_kube_upgrade_control_plane_result = \
|
||||
[ihost(None, FAKE_IHOST, True)]
|
||||
|
||||
def mock_ihost_manager_kube_upgrade_control_plane(obj, hostid):
|
||||
return self.ihost_manager_kube_upgrade_control_plane_result
|
||||
self.mocked_ihost_manager_kube_upgrade_control_plane = mock.patch(
|
||||
'cgtsclient.v1.ihost.ihostManager.kube_upgrade_control_plane',
|
||||
mock_ihost_manager_kube_upgrade_control_plane)
|
||||
self.mocked_ihost_manager_kube_upgrade_control_plane.start()
|
||||
self.addCleanup(
|
||||
self.mocked_ihost_manager_kube_upgrade_control_plane.stop)
|
||||
|
||||
def mock_ihost_manager_kube_upgrade_kubelet(obj, hostid):
|
||||
return self.ihost_manager_kube_upgrade_kubelet_result
|
||||
|
||||
self.mocked_ihost_manager_kube_upgrade_kubelet = mock.patch(
|
||||
'cgtsclient.v1.ihost.ihostManager.kube_upgrade_kubelet',
|
||||
mock_ihost_manager_kube_upgrade_kubelet)
|
||||
self.mocked_ihost_manager_kube_upgrade_kubelet.start()
|
||||
self.addCleanup(
|
||||
self.mocked_ihost_manager_kube_upgrade_kubelet.stop)
|
||||
|
||||
def test_kube_host_upgrade_list(self):
|
||||
self.make_env()
|
||||
|
||||
# Use --nowrap to prevent failure when test run with small terminal
|
||||
results = self.shell("kube-host-upgrade-list --nowrap")
|
||||
self.assertIn(str(FAKE_IHOST['id']), results)
|
||||
self.assertIn(str(FAKE_IHOST['hostname']), results)
|
||||
self.assertIn(str(FAKE_IHOST['personality']), results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE['target_version']), results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE['control_plane_version']),
|
||||
results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE['kubelet_version']), results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE['status']), results)
|
||||
|
||||
def test_kube_host_upgrade_list_multiple(self):
|
||||
self.make_env()
|
||||
self.kube_host_upgrade_manager_list_result = [
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE, True),
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE_2, True),
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE_3, True),
|
||||
]
|
||||
self.ihost_manager_list_result = [
|
||||
ihost(None, FAKE_IHOST, True),
|
||||
ihost(None, FAKE_IHOST_2, True),
|
||||
ihost(None, FAKE_IHOST_3, True),
|
||||
]
|
||||
|
||||
# Use --nowrap to prevent failure when test run with small terminal
|
||||
results = self.shell("kube-host-upgrade-list --nowrap")
|
||||
|
||||
for fake_ihost in [FAKE_IHOST, FAKE_IHOST_2, FAKE_IHOST_3]:
|
||||
self.assertIn(str(fake_ihost['id']), results)
|
||||
self.assertIn(str(fake_ihost['hostname']), results)
|
||||
self.assertIn(str(fake_ihost['personality']), results)
|
||||
|
||||
for fake_kube_host_upgrade in [FAKE_KUBE_HOST_UPGRADE,
|
||||
FAKE_KUBE_HOST_UPGRADE_2,
|
||||
FAKE_KUBE_HOST_UPGRADE_3]:
|
||||
self.assertIn(str(fake_kube_host_upgrade['target_version']),
|
||||
results)
|
||||
self.assertIn(str(fake_kube_host_upgrade['control_plane_version']),
|
||||
results)
|
||||
self.assertIn(str(fake_kube_host_upgrade['kubelet_version']),
|
||||
results)
|
||||
self.assertIn(str(fake_kube_host_upgrade['status']),
|
||||
results)
|
||||
|
||||
def test_kube_host_upgrade_control_plane(self):
|
||||
self.make_env()
|
||||
self.kube_host_upgrade_manager_list_result = [
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE, True),
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE_2, True),
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE_3, True),
|
||||
]
|
||||
self.ihost_manager_kube_upgrade_control_plane_result = \
|
||||
ihost(None, FAKE_IHOST_2, True)
|
||||
|
||||
results = self.shell("kube-host-upgrade controller-1 control-plane")
|
||||
|
||||
self.assertIn(str(FAKE_IHOST_2['id']), results)
|
||||
self.assertIn(str(FAKE_IHOST_2['hostname']), results)
|
||||
self.assertIn(str(FAKE_IHOST_2['personality']), results)
|
||||
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['target_version']),
|
||||
results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['control_plane_version']),
|
||||
results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['kubelet_version']),
|
||||
results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['status']),
|
||||
results)
|
||||
|
||||
def test_kube_host_upgrade_kubelet(self):
|
||||
self.make_env()
|
||||
self.kube_host_upgrade_manager_list_result = [
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE, True),
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE_2, True),
|
||||
KubeHostUpgrade(None, FAKE_KUBE_HOST_UPGRADE_3, True),
|
||||
]
|
||||
self.ihost_manager_kube_upgrade_kubelet_result = \
|
||||
ihost(None, FAKE_IHOST_2, True)
|
||||
|
||||
results = self.shell("kube-host-upgrade controller-1 kubelet")
|
||||
|
||||
self.assertIn(str(FAKE_IHOST_2['id']), results)
|
||||
self.assertIn(str(FAKE_IHOST_2['hostname']), results)
|
||||
self.assertIn(str(FAKE_IHOST_2['personality']), results)
|
||||
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['target_version']),
|
||||
results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['control_plane_version']),
|
||||
results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['kubelet_version']),
|
||||
results)
|
||||
self.assertIn(str(FAKE_KUBE_HOST_UPGRADE_2['status']),
|
||||
results)
|
|
@ -0,0 +1,80 @@
|
|||
#
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
import testtools
|
||||
|
||||
from cgtsclient.tests import utils
|
||||
import cgtsclient.v1.kube_host_upgrade
|
||||
|
||||
|
||||
KUBE_HOST_UPGRADE = {'id': 1,
|
||||
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
|
||||
'target_version': 'v1.42.3',
|
||||
'status': 'fake status',
|
||||
'control_plane_version': 'v1.42.2',
|
||||
'kubelet_version': 'v1.42.2',
|
||||
'host_id': 1,
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
|
||||
|
||||
fixtures = {
|
||||
'/v1/kube_host_upgrades':
|
||||
{
|
||||
'GET': (
|
||||
{},
|
||||
{"kube_host_upgrades": [KUBE_HOST_UPGRADE]},
|
||||
),
|
||||
},
|
||||
'/v1/kube_host_upgrades/%s' % KUBE_HOST_UPGRADE['uuid']:
|
||||
{
|
||||
'GET': (
|
||||
{},
|
||||
KUBE_HOST_UPGRADE,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class KubeHostUpgradeManagerTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(KubeHostUpgradeManagerTest, self).setUp()
|
||||
self.api = utils.FakeAPI(fixtures)
|
||||
self.mgr = cgtsclient.v1.kube_host_upgrade.KubeHostUpgradeManager(
|
||||
self.api)
|
||||
|
||||
def test_list(self):
|
||||
kube_host_upgrade_list = self.mgr.list()
|
||||
expect = [
|
||||
('GET', '/v1/kube_host_upgrades', {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(len(kube_host_upgrade_list), 1)
|
||||
|
||||
def test_get(self):
|
||||
kube_host_upgrade_list = self.mgr.get(KUBE_HOST_UPGRADE['uuid'])
|
||||
expect = [
|
||||
('GET', '/v1/kube_host_upgrades/%s' % KUBE_HOST_UPGRADE['uuid'],
|
||||
{}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(kube_host_upgrade_list.uuid,
|
||||
KUBE_HOST_UPGRADE['uuid'])
|
|
@ -0,0 +1,148 @@
|
|||
#
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
import testtools
|
||||
|
||||
from cgtsclient.tests import utils
|
||||
import cgtsclient.v1.kube_upgrade
|
||||
import cgtsclient.v1.kube_upgrade_shell
|
||||
|
||||
|
||||
KUBE_UPGRADE = {'from_version': 'v1.42.1',
|
||||
'to_version': 'v1.42.2',
|
||||
'state': 'upgrade-started',
|
||||
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
|
||||
CREATE_KUBE_UPGRADE = {'to_version': 'v1.42.2',
|
||||
'force': False}
|
||||
|
||||
UPDATED_KUBE_UPGRADE = {'from_version': 'v1.42.1',
|
||||
'to_version': 'v1.42.2',
|
||||
'state': 'upgrading-networking',
|
||||
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
|
||||
|
||||
fixtures = {
|
||||
'/v1/kube_upgrade':
|
||||
{
|
||||
'POST': (
|
||||
{},
|
||||
KUBE_UPGRADE,
|
||||
),
|
||||
'GET': (
|
||||
{},
|
||||
{"kube_upgrades": [KUBE_UPGRADE]},
|
||||
),
|
||||
'DELETE': (
|
||||
{},
|
||||
None,
|
||||
),
|
||||
'PATCH': (
|
||||
{},
|
||||
UPDATED_KUBE_UPGRADE,
|
||||
),
|
||||
},
|
||||
'/v1/kube_upgrade/%s' % KUBE_UPGRADE['uuid']:
|
||||
{
|
||||
'GET': (
|
||||
{},
|
||||
KUBE_UPGRADE,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class KubeUpgradeManagerTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(KubeUpgradeManagerTest, self).setUp()
|
||||
self.api = utils.FakeAPI(fixtures)
|
||||
self.mgr = cgtsclient.v1.kube_upgrade.KubeUpgradeManager(self.api)
|
||||
|
||||
def test_list(self):
|
||||
kube_upgrade_list = self.mgr.list()
|
||||
expect = [
|
||||
('GET', '/v1/kube_upgrade', {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(len(kube_upgrade_list), 1)
|
||||
|
||||
def test_get(self):
|
||||
kube_upgrade = self.mgr.get(KUBE_UPGRADE['uuid'])
|
||||
expect = [
|
||||
('GET', '/v1/kube_upgrade/%s' % KUBE_UPGRADE['uuid'], {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(kube_upgrade.from_version,
|
||||
KUBE_UPGRADE['from_version'])
|
||||
self.assertEqual(kube_upgrade.to_version,
|
||||
KUBE_UPGRADE['to_version'])
|
||||
self.assertEqual(kube_upgrade.state,
|
||||
KUBE_UPGRADE['state'])
|
||||
self.assertEqual(kube_upgrade.uuid,
|
||||
KUBE_UPGRADE['uuid'])
|
||||
self.assertEqual(kube_upgrade.created_at,
|
||||
KUBE_UPGRADE['created_at'])
|
||||
self.assertEqual(kube_upgrade.updated_at,
|
||||
KUBE_UPGRADE['updated_at'])
|
||||
|
||||
def test_create(self):
|
||||
kube_upgrade = self.mgr.create(**CREATE_KUBE_UPGRADE)
|
||||
expect = [
|
||||
('POST', '/v1/kube_upgrade', {}, CREATE_KUBE_UPGRADE),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(kube_upgrade.from_version,
|
||||
KUBE_UPGRADE['from_version'])
|
||||
self.assertEqual(kube_upgrade.to_version,
|
||||
KUBE_UPGRADE['to_version'])
|
||||
self.assertEqual(kube_upgrade.state,
|
||||
KUBE_UPGRADE['state'])
|
||||
self.assertEqual(kube_upgrade.uuid,
|
||||
KUBE_UPGRADE['uuid'])
|
||||
self.assertEqual(kube_upgrade.created_at,
|
||||
KUBE_UPGRADE['created_at'])
|
||||
self.assertEqual(kube_upgrade.updated_at,
|
||||
KUBE_UPGRADE['updated_at'])
|
||||
|
||||
def test_delete(self):
|
||||
self.mgr.delete()
|
||||
expect = [
|
||||
('DELETE', '/v1/kube_upgrade', {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
|
||||
def test_update(self):
|
||||
patch = {'op': 'replace',
|
||||
'value': 'upgrading-networking',
|
||||
'path': '/state'}
|
||||
kube_upgrade = self.mgr.update(patch=patch)
|
||||
expect = [
|
||||
('PATCH', '/v1/kube_upgrade', {}, patch),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(kube_upgrade.state,
|
||||
cgtsclient.v1.kube_upgrade_shell.
|
||||
KUBE_UPGRADE_STATE_UPGRADING_NETWORKING)
|
|
@ -0,0 +1,133 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
from cgtsclient.tests import test_shell
|
||||
from cgtsclient.v1.kube_upgrade import KubeUpgrade
|
||||
|
||||
|
||||
class KubeUpgradeTest(test_shell.ShellTest):
|
||||
|
||||
def setUp(self):
|
||||
super(KubeUpgradeTest, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
super(KubeUpgradeTest, self).tearDown()
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.list')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_upgrade_show(self, mock_get_endpoint, mock_get_client,
|
||||
mock_list):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
fake_kube_upgrade = {'from_version': 'v1.42.1',
|
||||
'to_version': 'v1.42.2',
|
||||
'state': 'upgrade-started',
|
||||
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
fake_kube_upgrade_list = [KubeUpgrade(None, fake_kube_upgrade, True)]
|
||||
mock_list.return_value = fake_kube_upgrade_list
|
||||
|
||||
self.make_env()
|
||||
results = self.shell("kube-upgrade-show")
|
||||
self.assertIn(fake_kube_upgrade['from_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['to_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['state'], results)
|
||||
self.assertIn(fake_kube_upgrade['uuid'], results)
|
||||
self.assertIn(fake_kube_upgrade['created_at'], results)
|
||||
self.assertIn(fake_kube_upgrade['updated_at'], results)
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.create')
|
||||
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.get')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_upgrade_start(self, mock_get_endpoint, mock_get_client,
|
||||
mock_get, mock_create):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
fake_kube_upgrade = {'from_version': 'v1.42.1',
|
||||
'to_version': 'v1.42.2',
|
||||
'state': 'upgrade-started',
|
||||
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
mock_create.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
|
||||
mock_get.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
|
||||
|
||||
self.make_env()
|
||||
results = self.shell("kube-upgrade-start %s" %
|
||||
fake_kube_upgrade['to_version'])
|
||||
self.assertIn(fake_kube_upgrade['from_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['to_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['state'], results)
|
||||
self.assertIn(fake_kube_upgrade['uuid'], results)
|
||||
self.assertIn(fake_kube_upgrade['created_at'], results)
|
||||
self.assertIn(fake_kube_upgrade['updated_at'], results)
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.create')
|
||||
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.get')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_upgrade_start_force(self, mock_get_endpoint, mock_get_client,
|
||||
mock_get, mock_create):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
fake_kube_upgrade = {'from_version': 'v1.42.1',
|
||||
'to_version': 'v1.42.2',
|
||||
'state': 'upgrade-started',
|
||||
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
mock_create.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
|
||||
mock_get.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
|
||||
|
||||
self.make_env()
|
||||
results = self.shell("kube-upgrade-start %s --force" %
|
||||
fake_kube_upgrade['to_version'])
|
||||
self.assertIn(fake_kube_upgrade['from_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['to_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['state'], results)
|
||||
self.assertIn(fake_kube_upgrade['uuid'], results)
|
||||
self.assertIn(fake_kube_upgrade['created_at'], results)
|
||||
self.assertIn(fake_kube_upgrade['updated_at'], results)
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.update')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_upgrade_networking(self, mock_get_endpoint, mock_get_client,
|
||||
mock_update):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
fake_kube_upgrade = {'from_version': 'v1.42.1',
|
||||
'to_version': 'v1.42.2',
|
||||
'state': 'upgrading-networking',
|
||||
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
|
||||
'created_at': 'fake-created-time',
|
||||
'updated_at': 'fake-updated-time',
|
||||
}
|
||||
mock_update.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
|
||||
|
||||
self.make_env()
|
||||
results = self.shell("kube-upgrade-networking")
|
||||
self.assertIn(fake_kube_upgrade['from_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['to_version'], results)
|
||||
self.assertIn(fake_kube_upgrade['state'], results)
|
||||
self.assertIn(fake_kube_upgrade['uuid'], results)
|
||||
self.assertIn(fake_kube_upgrade['created_at'], results)
|
||||
self.assertIn(fake_kube_upgrade['updated_at'], results)
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.delete')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_upgrade_delete(self, mock_get_endpoint, mock_get_client,
|
||||
mock_delete):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
|
||||
self.make_env()
|
||||
results = self.shell("kube-upgrade-delete")
|
||||
self.assertIn("Kubernetes upgrade deleted", results)
|
|
@ -53,6 +53,8 @@ from cgtsclient.v1 import istor
|
|||
from cgtsclient.v1 import isystem
|
||||
from cgtsclient.v1 import itrapdest
|
||||
from cgtsclient.v1 import iuser
|
||||
from cgtsclient.v1 import kube_host_upgrade
|
||||
from cgtsclient.v1 import kube_upgrade
|
||||
from cgtsclient.v1 import kube_version
|
||||
from cgtsclient.v1 import label
|
||||
from cgtsclient.v1 import license
|
||||
|
@ -161,3 +163,5 @@ class Client(http.HTTPClient):
|
|||
self.app = app.AppManager(self)
|
||||
self.host_fs = host_fs.HostFsManager(self)
|
||||
self.kube_version = kube_version.KubeVersionManager(self)
|
||||
self.kube_upgrade = kube_upgrade.KubeUpgradeManager(self)
|
||||
self.kube_host_upgrade = kube_host_upgrade.KubeHostUpgradeManager(self)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
from collections import OrderedDict
|
||||
import datetime
|
||||
import os
|
||||
|
||||
|
@ -64,6 +65,22 @@ def _print_ihost_show(ihost, columns=None, output_format=None):
|
|||
utils.print_dict_with_format(data, wrap=72, output_format=output_format)
|
||||
|
||||
|
||||
def _get_kube_host_upgrade_details(cc):
|
||||
# Get the list of kubernetes host upgrades
|
||||
kube_host_upgrades = cc.kube_host_upgrade.list()
|
||||
|
||||
# Map the host_id to hostname and personality
|
||||
kube_host_upgrade_details = dict()
|
||||
for kube_host_upgrade in kube_host_upgrades:
|
||||
kube_host_upgrade_details[kube_host_upgrade.host_id] = {
|
||||
'target_version': kube_host_upgrade.target_version,
|
||||
'control_plane_version': kube_host_upgrade.control_plane_version,
|
||||
'kubelet_version': kube_host_upgrade.kubelet_version,
|
||||
'status': kube_host_upgrade.status}
|
||||
|
||||
return kube_host_upgrade_details
|
||||
|
||||
|
||||
@utils.arg('hostnameorid', metavar='<hostname or id>',
|
||||
help="Name or ID of host")
|
||||
@utils.arg('--column',
|
||||
|
@ -110,6 +127,31 @@ def do_host_upgrade_list(cc, args):
|
|||
utils.print_list(ihosts, fields, field_labels, sortby=0)
|
||||
|
||||
|
||||
def do_kube_host_upgrade_list(cc, args):
|
||||
"""List kubernetes upgrade info for hosts."""
|
||||
|
||||
# Get the list of hosts
|
||||
ihosts = cc.ihost.list()
|
||||
# Get the kubernetes host upgrades
|
||||
kube_host_upgrade_details = _get_kube_host_upgrade_details(cc)
|
||||
|
||||
for host in ihosts:
|
||||
host.target_version = \
|
||||
kube_host_upgrade_details[host.id]['target_version']
|
||||
host.control_plane_version = \
|
||||
kube_host_upgrade_details[host.id]['control_plane_version']
|
||||
host.kubelet_version = \
|
||||
kube_host_upgrade_details[host.id]['kubelet_version']
|
||||
host.status = \
|
||||
kube_host_upgrade_details[host.id]['status']
|
||||
|
||||
field_labels = ['id', 'hostname', 'personality', 'target_version',
|
||||
'control_plane_version', 'kubelet_version', 'status']
|
||||
fields = ['id', 'hostname', 'personality', 'target_version',
|
||||
'control_plane_version', 'kubelet_version', 'status']
|
||||
utils.print_list(ihosts, fields, field_labels, sortby=0)
|
||||
|
||||
|
||||
@utils.arg('-n', '--hostname',
|
||||
metavar='<hostname>',
|
||||
help='Hostname of the host')
|
||||
|
@ -763,3 +805,41 @@ def do_host_upgrade(cc, args):
|
|||
|
||||
ihost = cc.ihost.upgrade(args.hostid, args.force)
|
||||
_print_ihost_show(ihost)
|
||||
|
||||
|
||||
@utils.arg('hostid',
|
||||
metavar='<hostname or id>',
|
||||
help="Name or ID of host")
|
||||
@utils.arg('component',
|
||||
metavar='<component>',
|
||||
choices=['control-plane', 'kubelet'],
|
||||
help='kubernetes component to upgrade')
|
||||
def do_kube_host_upgrade(cc, args):
|
||||
"""Perform kubernetes upgrade for a host."""
|
||||
|
||||
if args.component == 'control-plane':
|
||||
host = cc.ihost.kube_upgrade_control_plane(args.hostid)
|
||||
elif args.component == 'kubelet':
|
||||
host = cc.ihost.kube_upgrade_kubelet(args.hostid)
|
||||
else:
|
||||
raise exc.CommandError('Invalid component value: %s' % args.component)
|
||||
|
||||
# Get the kubernetes host upgrades
|
||||
kube_host_upgrade_details = _get_kube_host_upgrade_details(cc)
|
||||
|
||||
host.target_version = \
|
||||
kube_host_upgrade_details[host.id]['target_version']
|
||||
host.control_plane_version = \
|
||||
kube_host_upgrade_details[host.id]['control_plane_version']
|
||||
host.kubelet_version = \
|
||||
kube_host_upgrade_details[host.id]['kubelet_version']
|
||||
host.status = \
|
||||
kube_host_upgrade_details[host.id]['status']
|
||||
|
||||
fields = ['id', 'hostname', 'personality', 'target_version',
|
||||
'control_plane_version', 'kubelet_version', 'status']
|
||||
|
||||
data_list = [(f, getattr(host, f, '')) for f in fields]
|
||||
data = dict(data_list)
|
||||
ordereddata = OrderedDict(sorted(data.items(), key=lambda t: t[0]))
|
||||
utils.print_dict(ordereddata, wrap=72)
|
||||
|
|
|
@ -122,6 +122,16 @@ class ihostManager(base.Manager):
|
|||
result = self._json_get(self._path('bulk_export'))
|
||||
return result
|
||||
|
||||
def kube_upgrade_control_plane(self, hostid):
|
||||
resp, body = self.api.json_request(
|
||||
'POST', self._path(hostid) + "/kube_upgrade_control_plane")
|
||||
return self.resource_class(self, body)
|
||||
|
||||
def kube_upgrade_kubelet(self, hostid):
|
||||
resp, body = self.api.json_request(
|
||||
'POST', self._path(hostid) + "/kube_upgrade_kubelet")
|
||||
return self.resource_class(self, body)
|
||||
|
||||
|
||||
def _find_ihost(cc, ihost):
|
||||
if ihost.isdigit() or utils.is_uuid_like(ihost):
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from cgtsclient.common import base
|
||||
|
||||
|
||||
class KubeHostUpgrade(base.Resource):
|
||||
def __repr__(self):
|
||||
return "<kube_host_upgrade %s>" % self._info
|
||||
|
||||
|
||||
class KubeHostUpgradeManager(base.Manager):
|
||||
resource_class = KubeHostUpgrade
|
||||
|
||||
@staticmethod
|
||||
def _path(uuid=None):
|
||||
return '/v1/kube_host_upgrades/%s' % uuid if uuid \
|
||||
else '/v1/kube_host_upgrades'
|
||||
|
||||
def list(self):
|
||||
"""Retrieve the list of kubernetes host upgrades known to the system."""
|
||||
|
||||
return self._list(self._path(), "kube_host_upgrades")
|
||||
|
||||
def get(self, uuid):
|
||||
"""Retrieve the details of a given kubernetes host upgrade.
|
||||
|
||||
:param uuid: uuid of kubernetes host upgrade
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._list(self._path(uuid))[0]
|
||||
except IndexError:
|
||||
return None
|
|
@ -0,0 +1,59 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from cgtsclient.common import base
|
||||
|
||||
|
||||
class KubeUpgrade(base.Resource):
|
||||
def __repr__(self):
|
||||
return "<kube_upgrade %s>" % self._info
|
||||
|
||||
|
||||
class KubeUpgradeManager(base.Manager):
|
||||
resource_class = KubeUpgrade
|
||||
|
||||
@staticmethod
|
||||
def _path(uuid=None):
|
||||
return '/v1/kube_upgrade/%s' % uuid if uuid else '/v1/kube_upgrade'
|
||||
|
||||
def list(self):
|
||||
"""Retrieve the list of kubernetes upgrades known to the system."""
|
||||
|
||||
return self._list(self._path(), "kube_upgrades")
|
||||
|
||||
def get(self, uuid):
|
||||
"""Retrieve the details of a given kubernetes upgrade.
|
||||
|
||||
:param uuid: uuid of upgrade
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._list(self._path(uuid))[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def create(self, to_version, force):
|
||||
"""Create a new kubernetes upgrade.
|
||||
|
||||
:param to_version: target kubernetes version
|
||||
:param force: ignore non management-affecting alarms
|
||||
"""
|
||||
new = {}
|
||||
new['to_version'] = to_version
|
||||
new['force'] = force
|
||||
return self._create(self._path(), new)
|
||||
|
||||
def delete(self):
|
||||
"""Delete a kubernetes upgrade."""
|
||||
|
||||
res, body = self.api.json_request('DELETE', self._path())
|
||||
if body:
|
||||
return self.resource_class(self, body)
|
||||
|
||||
def update(self, patch):
|
||||
"""Update a kubernetes upgrade."""
|
||||
|
||||
return self._update(self._path(), patch)
|
|
@ -0,0 +1,75 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from cgtsclient.common import utils
|
||||
from cgtsclient import exc
|
||||
|
||||
# Kubernetes constants
|
||||
KUBE_UPGRADE_STATE_UPGRADING_NETWORKING = 'upgrading-networking'
|
||||
|
||||
|
||||
def _print_kube_upgrade_show(obj):
|
||||
fields = ['uuid', 'from_version', 'to_version', 'state', 'created_at',
|
||||
'updated_at']
|
||||
data = [(f, getattr(obj, f, '')) for f in fields]
|
||||
utils.print_tuple_list(data)
|
||||
|
||||
|
||||
def do_kube_upgrade_show(cc, args):
|
||||
"""Show kubernetes upgrade details and attributes."""
|
||||
|
||||
kube_upgrades = cc.kube_upgrade.list()
|
||||
if kube_upgrades:
|
||||
_print_kube_upgrade_show(kube_upgrades[0])
|
||||
else:
|
||||
print('A kubernetes upgrade is not in progress')
|
||||
|
||||
|
||||
@utils.arg('to_version', metavar='<target kubernetes version>',
|
||||
help="target Kubernetes version")
|
||||
@utils.arg('-f', '--force',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="Ignore non management-affecting alarms")
|
||||
def do_kube_upgrade_start(cc, args):
|
||||
"""Start a kubernetes upgrade. """
|
||||
|
||||
kube_upgrade = cc.kube_upgrade.create(args.to_version, args.force)
|
||||
uuid = getattr(kube_upgrade, 'uuid', '')
|
||||
try:
|
||||
kube_upgrade = cc.kube_upgrade.get(uuid)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('Created kubernetes upgrade UUID not found: %s'
|
||||
% uuid)
|
||||
_print_kube_upgrade_show(kube_upgrade)
|
||||
|
||||
|
||||
def do_kube_upgrade_networking(cc, args):
|
||||
"""Upgrade kubernetes networking."""
|
||||
|
||||
data = dict()
|
||||
data['state'] = KUBE_UPGRADE_STATE_UPGRADING_NETWORKING
|
||||
|
||||
patch = []
|
||||
for (k, v) in data.items():
|
||||
patch.append({'op': 'replace', 'path': '/' + k, 'value': v})
|
||||
try:
|
||||
kube_upgrade = cc.kube_upgrade.update(patch)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('Kubernetes upgrade UUID not found')
|
||||
|
||||
_print_kube_upgrade_show(kube_upgrade)
|
||||
|
||||
|
||||
def do_kube_upgrade_delete(cc, args):
|
||||
"""Delete a kubernetes upgrade."""
|
||||
|
||||
try:
|
||||
cc.kube_upgrade.delete()
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('Kubernetes upgrade not found')
|
||||
|
||||
print("Kubernetes upgrade deleted")
|
|
@ -41,6 +41,7 @@ from cgtsclient.v1 import isystem_shell
|
|||
from cgtsclient.v1 import itrapdest_shell
|
||||
from cgtsclient.v1 import iuser_shell
|
||||
|
||||
from cgtsclient.v1 import kube_upgrade_shell
|
||||
from cgtsclient.v1 import kube_version_shell
|
||||
from cgtsclient.v1 import label_shell
|
||||
from cgtsclient.v1 import license_shell
|
||||
|
@ -121,6 +122,7 @@ COMMAND_MODULES = [
|
|||
app_shell,
|
||||
host_fs_shell,
|
||||
kube_version_shell,
|
||||
kube_upgrade_shell,
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@ from sysinv.api.controllers.v1 import health
|
|||
from sysinv.api.controllers.v1 import helm_charts
|
||||
from sysinv.api.controllers.v1 import host
|
||||
from sysinv.api.controllers.v1 import kube_app
|
||||
from sysinv.api.controllers.v1 import kube_host_upgrade
|
||||
from sysinv.api.controllers.v1 import kube_upgrade
|
||||
from sysinv.api.controllers.v1 import kube_version
|
||||
from sysinv.api.controllers.v1 import label
|
||||
from sysinv.api.controllers.v1 import interface
|
||||
|
@ -253,6 +255,12 @@ class V1(base.APIBase):
|
|||
kube_versions = [link.Link]
|
||||
"Links to the kube_version resource"
|
||||
|
||||
kube_upgrade = [link.Link]
|
||||
"Links to the kube_upgrade resource"
|
||||
|
||||
kube_host_upgrades = [link.Link]
|
||||
"Links to the kube_host_upgrade resource"
|
||||
|
||||
@classmethod
|
||||
def convert(self):
|
||||
v1 = V1()
|
||||
|
@ -786,6 +794,21 @@ class V1(base.APIBase):
|
|||
'kube_versions', '',
|
||||
bookmark=True)]
|
||||
|
||||
v1.kube_upgrade = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'kube_upgrade', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'kube_upgrade', '',
|
||||
bookmark=True)]
|
||||
|
||||
v1.kube_host_upgrades = [link.Link.make_link('self',
|
||||
pecan.request.host_url,
|
||||
'kube_host_upgrades', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'kube_host_upgrades', '',
|
||||
bookmark=True)]
|
||||
|
||||
return v1
|
||||
|
||||
|
||||
|
@ -855,6 +878,8 @@ class Controller(rest.RestController):
|
|||
interface_datanetworks = interface_datanetwork.InterfaceDataNetworkController()
|
||||
host_fs = host_fs.HostFsController()
|
||||
kube_versions = kube_version.KubeVersionController()
|
||||
kube_upgrade = kube_upgrade.KubeUpgradeController()
|
||||
kube_host_upgrades = kube_host_upgrade.KubeHostUpgradeController()
|
||||
|
||||
@wsme_pecan.wsexpose(V1)
|
||||
def get(self):
|
||||
|
|
|
@ -89,6 +89,7 @@ from sysinv.api.controllers.v1 import patch_api
|
|||
from sysinv.common import ceph
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.openstack.common import uuidutils
|
||||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
|
@ -1079,7 +1080,9 @@ class HostController(rest.RestController):
|
|||
'upgrade': ['POST'],
|
||||
'downgrade': ['POST'],
|
||||
'install_progress': ['POST'],
|
||||
'wipe_osds': ['GET']
|
||||
'wipe_osds': ['GET'],
|
||||
'kube_upgrade_control_plane': ['POST'],
|
||||
'kube_upgrade_kubelet': ['POST'],
|
||||
}
|
||||
|
||||
def __init__(self, from_isystem=False):
|
||||
|
@ -1087,6 +1090,7 @@ class HostController(rest.RestController):
|
|||
self._mtc_address = constants.LOCALHOST_HOSTNAME
|
||||
self._mtc_port = 2112
|
||||
self._ceph = ceph.CephApiOperator()
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
self._api_token = None
|
||||
# self._name = 'api-host'
|
||||
|
@ -6433,6 +6437,186 @@ class HostController(rest.RestController):
|
|||
hostupdate.ihost_val_prenotify_update(val)
|
||||
hostupdate.ihost_val.update(val)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(Host, six.text_type)
|
||||
def kube_upgrade_control_plane(self, uuid):
|
||||
"""Upgrade the kubernetes control plane on this host"""
|
||||
|
||||
host_obj = objects.host.get_by_uuid(pecan.request.context, uuid)
|
||||
|
||||
# The kubernetes upgrade must have been started
|
||||
try:
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(
|
||||
pecan.request.context)
|
||||
except exception.NotFound:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"A kubernetes upgrade is not in progress."))
|
||||
|
||||
# Either controller can be upgraded
|
||||
if host_obj.personality != constants.CONTROLLER:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"This host does not have a kubernetes control plane."))
|
||||
|
||||
# Verify the upgrade is in the correct state
|
||||
if kube_upgrade_obj.state not in [kubernetes.KUBE_UPGRADE_STARTED,
|
||||
kubernetes.KUBE_UPGRADED_NETWORKING]:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The kubernetes upgrade must be in the %s or %s state to "
|
||||
"upgrade the control plane." % (
|
||||
kubernetes.KUBE_UPGRADE_STARTED,
|
||||
kubernetes.KUBE_UPGRADED_NETWORKING)))
|
||||
|
||||
# Check the existing control plane version
|
||||
cp_versions = self._kube_operator.kube_get_control_plane_versions()
|
||||
current_cp_version = cp_versions.get(host_obj.hostname)
|
||||
if current_cp_version == kube_upgrade_obj.to_version:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The kubernetes control plane on this host was already "
|
||||
"upgraded."))
|
||||
elif current_cp_version is None:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Unable to determine the version of the kubernetes control "
|
||||
"plane on this host."))
|
||||
|
||||
# The host must be unlocked/available to upgrade the control plane
|
||||
if (host_obj.administrative != constants.ADMIN_UNLOCKED or
|
||||
host_obj.operational != constants.OPERATIONAL_ENABLED):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The host must be unlocked and available to upgrade the "
|
||||
"control plane."))
|
||||
|
||||
# Update the target version for this host
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
pecan.request.context, host_obj.id)
|
||||
kube_host_upgrade_obj.target_version = kube_upgrade_obj.to_version
|
||||
kube_host_upgrade_obj.save()
|
||||
|
||||
if kube_upgrade_obj.state == kubernetes.KUBE_UPGRADE_STARTED:
|
||||
# Tell the conductor to upgrade the control plane
|
||||
pecan.request.rpcapi.kube_upgrade_control_plane(
|
||||
pecan.request.context, host_obj.uuid)
|
||||
|
||||
# Update the upgrade state
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_FIRST_MASTER
|
||||
kube_upgrade_obj.save()
|
||||
else:
|
||||
# Tell the conductor to upgrade the control plane
|
||||
pecan.request.rpcapi.kube_upgrade_control_plane(
|
||||
pecan.request.context, host_obj.uuid)
|
||||
|
||||
# Update the upgrade state
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_SECOND_MASTER
|
||||
kube_upgrade_obj.save()
|
||||
|
||||
LOG.info("Upgrading kubernetes control plane on host %s" %
|
||||
host_obj.hostname)
|
||||
return Host.convert_with_links(host_obj)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(Host, six.text_type)
|
||||
def kube_upgrade_kubelet(self, uuid):
|
||||
"""Upgrade the kubernetes kubelet on this host"""
|
||||
|
||||
host_obj = objects.host.get_by_uuid(pecan.request.context, uuid)
|
||||
|
||||
# The kubernetes upgrade must have been started
|
||||
try:
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(
|
||||
pecan.request.context)
|
||||
except exception.NotFound:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"A kubernetes upgrade is not in progress."))
|
||||
|
||||
# Verify the host has a kubelet
|
||||
if host_obj.personality not in [constants.CONTROLLER,
|
||||
constants.WORKER]:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"This host does not have a kubelet."))
|
||||
|
||||
# Verify the upgrade is in the correct state
|
||||
if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX:
|
||||
if kube_upgrade_obj.state != kubernetes.KUBE_UPGRADED_NETWORKING:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The kubernetes upgrade must be in the %s state to "
|
||||
"upgrade the kubelet." %
|
||||
kubernetes.KUBE_UPGRADED_NETWORKING))
|
||||
elif kube_upgrade_obj.state not in [
|
||||
kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
kubernetes.KUBE_UPGRADING_KUBELETS]:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The kubernetes upgrade must be in the %s or %s state to "
|
||||
"upgrade the kubelet." % (
|
||||
kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
kubernetes.KUBE_UPGRADING_KUBELETS)))
|
||||
|
||||
# The necessary patches must be applied
|
||||
api_token = None
|
||||
system = pecan.request.dbapi.isystem_get_one()
|
||||
target_version_obj = objects.kube_version.get_by_version(
|
||||
kube_upgrade_obj.to_version)
|
||||
if target_version_obj.available_patches:
|
||||
patches_applied = patch_api.patch_is_applied(
|
||||
token=api_token,
|
||||
timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,
|
||||
region_name=system.region_name,
|
||||
patches=target_version_obj.available_patches
|
||||
)
|
||||
if not patches_applied:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The following patches must be applied before upgrading "
|
||||
"the kubelets: %s" % target_version_obj.available_patches))
|
||||
|
||||
# Enforce the ordering of controllers first
|
||||
kubelet_versions = self._kube_operator.kube_get_kubelet_versions()
|
||||
if host_obj.personality == constants.WORKER:
|
||||
if kubelet_versions.get(constants.CONTROLLER_0_HOSTNAME) != \
|
||||
kube_upgrade_obj.to_version or \
|
||||
kubelet_versions.get(constants.CONTROLLER_1_HOSTNAME) != \
|
||||
kube_upgrade_obj.to_version:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The kubelets on all controller hosts must be upgraded "
|
||||
"before upgrading kubelets on worker hosts."))
|
||||
|
||||
# Check the existing kubelet version
|
||||
current_kubelet_version = kubelet_versions.get(host_obj.hostname)
|
||||
if current_kubelet_version == kube_upgrade_obj.to_version:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The kubelet on this host was already upgraded."))
|
||||
elif current_kubelet_version is None:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Unable to determine the version of the kubelet on this host."))
|
||||
|
||||
# Verify the host is in the correct state
|
||||
if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX:
|
||||
if (host_obj.administrative != constants.ADMIN_UNLOCKED or
|
||||
host_obj.availability != constants.AVAILABILITY_AVAILABLE):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The host must be unlocked and available to upgrade the "
|
||||
"kubelet."))
|
||||
elif (host_obj.administrative != constants.ADMIN_LOCKED or
|
||||
host_obj.availability != constants.AVAILABILITY_ONLINE):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The host must be locked and online to upgrade the kubelet."))
|
||||
|
||||
# Set the target version if this is a worker host
|
||||
if host_obj.personality == constants.WORKER:
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
pecan.request.context, host_obj.id)
|
||||
kube_host_upgrade_obj.target_version = kube_upgrade_obj.to_version
|
||||
kube_host_upgrade_obj.save()
|
||||
|
||||
# Tell the conductor to upgrade the kubelet
|
||||
pecan.request.rpcapi.kube_upgrade_kubelet(pecan.request.context,
|
||||
host_obj.uuid)
|
||||
|
||||
# Update the upgrade state
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_KUBELETS
|
||||
kube_upgrade_obj.save()
|
||||
|
||||
LOG.info("Upgrading kubernetes kubelet on host %s" %
|
||||
host_obj.hostname)
|
||||
return Host.convert_with_links(host_obj)
|
||||
|
||||
|
||||
def _create_node(host, xml_node, personality, is_dynamic_ip):
|
||||
host_node = et.SubElement(xml_node, 'host')
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from sysinv.api.controllers.v1 import base
|
||||
from sysinv.api.controllers.v1 import collection
|
||||
from sysinv.api.controllers.v1 import link
|
||||
from sysinv.api.controllers.v1 import types
|
||||
from sysinv.api.controllers.v1 import utils
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv import objects
|
||||
|
||||
|
||||
class KubeHostUpgrade(base.APIBase):
|
||||
"""API representation of a Kubernetes Host Upgrade."""
|
||||
|
||||
id = int
|
||||
"Unique ID for this entry"
|
||||
|
||||
uuid = types.uuid
|
||||
"Unique UUID for this entry"
|
||||
|
||||
target_version = wtypes.text
|
||||
"The target version for this host"
|
||||
|
||||
status = wtypes.text
|
||||
"The status of the kubernetes upgrade for this host"
|
||||
|
||||
control_plane_version = wtypes.text
|
||||
"The control plane version for this host"
|
||||
|
||||
kubelet_version = wtypes.text
|
||||
"The kubelet version for this host"
|
||||
|
||||
host_id = int
|
||||
"The host this belongs to"
|
||||
|
||||
links = [link.Link]
|
||||
"A list containing a self link and associated kubernetes host upgrade links"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.fields = list(objects.kube_host_upgrade.fields.keys())
|
||||
for k in self.fields:
|
||||
if not hasattr(self, k):
|
||||
continue
|
||||
setattr(self, k, kwargs.get(k))
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, kube_host_upgrade_obj, expand=True):
|
||||
kube_host_upgrade = KubeHostUpgrade(**kube_host_upgrade_obj.as_dict())
|
||||
if not expand:
|
||||
kube_host_upgrade.unset_fields_except([
|
||||
'id', 'uuid', 'target_version', 'status',
|
||||
'control_plane_version', 'kubelet_version', 'host_id'])
|
||||
|
||||
kube_host_upgrade.links = [
|
||||
link.Link.make_link('self', pecan.request.host_url,
|
||||
'kube_host_upgrade', kube_host_upgrade.uuid),
|
||||
link.Link.make_link('bookmark', pecan.request.host_url,
|
||||
'kube_host_upgrade', kube_host_upgrade.uuid,
|
||||
bookmark=True)]
|
||||
return kube_host_upgrade
|
||||
|
||||
|
||||
class KubeHostUpgradeCollection(collection.Collection):
|
||||
"""API representation of a collection of kubernetes host upgrades."""
|
||||
|
||||
kube_host_upgrades = [KubeHostUpgrade]
|
||||
"A list containing kubernetes host upgrade objects"
|
||||
|
||||
def __init__(self):
|
||||
self._type = 'kube_host_upgrades'
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, kube_host_upgrade_objs, limit, url=None,
|
||||
expand=False, **kwargs):
|
||||
upgrade_collection = KubeHostUpgradeCollection()
|
||||
upgrade_collection.kube_host_upgrades = [
|
||||
KubeHostUpgrade.convert_with_links(p, expand)
|
||||
for p in kube_host_upgrade_objs]
|
||||
upgrade_collection.next = upgrade_collection.get_next(
|
||||
limit, url=url, **kwargs)
|
||||
return upgrade_collection
|
||||
|
||||
|
||||
class KubeHostUpgradeController(rest.RestController):
|
||||
"""REST controller for kubernetes host upgrades."""
|
||||
|
||||
def __init__(self):
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
@staticmethod
|
||||
def _get_host_details():
|
||||
# Retrieve the list of hosts from the database
|
||||
host_objs = pecan.request.dbapi.ihost_get_list()
|
||||
|
||||
# Map the host_id to required fields
|
||||
host_details = dict()
|
||||
for host_obj in host_objs:
|
||||
host_details[host_obj.id] = {
|
||||
'hostname': host_obj.hostname,
|
||||
'personality': host_obj.personality,
|
||||
}
|
||||
|
||||
return host_details
|
||||
|
||||
@staticmethod
|
||||
def _set_dynamic_versions(upgrade_obj, host_details, cp_versions,
|
||||
kubelet_versions):
|
||||
# Not all hosts support kubernetes
|
||||
if host_details[upgrade_obj.host_id]['personality'] == \
|
||||
constants.CONTROLLER:
|
||||
upgrade_obj.control_plane_version = \
|
||||
cp_versions.get(
|
||||
host_details[upgrade_obj.host_id]['hostname'],
|
||||
'unknown')
|
||||
else:
|
||||
upgrade_obj.control_plane_version = 'N/A'
|
||||
|
||||
if host_details[upgrade_obj.host_id]['personality'] in \
|
||||
[constants.CONTROLLER, constants.WORKER]:
|
||||
upgrade_obj.kubelet_version = \
|
||||
kubelet_versions.get(
|
||||
host_details[upgrade_obj.host_id]['hostname'],
|
||||
'unknown')
|
||||
else:
|
||||
upgrade_obj.kubelet_version = 'N/A'
|
||||
|
||||
@wsme_pecan.wsexpose(KubeHostUpgradeCollection, wtypes.text, int,
|
||||
wtypes.text, wtypes.text)
|
||||
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||
"""Retrieve a list of kubernetes host upgrades."""
|
||||
|
||||
limit = utils.validate_limit(limit)
|
||||
sort_dir = utils.validate_sort_dir(sort_dir)
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.kube_host_upgrade.get_by_uuid(
|
||||
pecan.request.context,
|
||||
marker)
|
||||
|
||||
# Retrieve the host upgrades from the database
|
||||
kube_host_upgrades = pecan.request.dbapi.kube_host_upgrade_get_list(
|
||||
limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
# Get some details about the hosts
|
||||
host_details = self._get_host_details()
|
||||
|
||||
# Get the dynamic version information
|
||||
cp_versions = self._kube_operator.kube_get_control_plane_versions()
|
||||
kubelet_versions = self._kube_operator.kube_get_kubelet_versions()
|
||||
|
||||
for upgrade_obj in kube_host_upgrades:
|
||||
self._set_dynamic_versions(upgrade_obj, host_details, cp_versions,
|
||||
kubelet_versions)
|
||||
|
||||
return KubeHostUpgradeCollection.convert_with_links(
|
||||
kube_host_upgrades, limit, sort_key=sort_key, sort_dir=sort_dir)
|
||||
|
||||
@wsme_pecan.wsexpose(KubeHostUpgrade, wtypes.text)
|
||||
def get_one(self, kube_host_upgrade_uuid):
|
||||
"""Retrieve information about the given kube host upgrade."""
|
||||
|
||||
# Retrieve the host upgrade from the database
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_uuid(
|
||||
pecan.request.context, kube_host_upgrade_uuid)
|
||||
|
||||
# Get some details about the hosts
|
||||
host_details = self._get_host_details()
|
||||
|
||||
# Get the dynamic version information
|
||||
cp_versions = self._kube_operator.kube_get_control_plane_versions()
|
||||
kubelet_versions = self._kube_operator.kube_get_kubelet_versions()
|
||||
|
||||
self._set_dynamic_versions(kube_host_upgrade_obj, host_details,
|
||||
cp_versions, kubelet_versions)
|
||||
|
||||
return KubeHostUpgrade.convert_with_links(kube_host_upgrade_obj)
|
|
@ -0,0 +1,302 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import os
|
||||
import six
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from sysinv._i18n import _
|
||||
from sysinv.api.controllers.v1 import base
|
||||
from sysinv.api.controllers.v1 import collection
|
||||
from sysinv.api.controllers.v1 import link
|
||||
from sysinv.api.controllers.v1 import patch_api
|
||||
from sysinv.api.controllers.v1 import types
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv import objects
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class KubeUpgradePatchType(types.JsonPatchType):
|
||||
|
||||
@staticmethod
|
||||
def mandatory_attrs():
|
||||
return ['/state']
|
||||
|
||||
|
||||
class KubeUpgrade(base.APIBase):
|
||||
"""API representation of a Kubernetes Upgrade."""
|
||||
|
||||
id = int
|
||||
"Unique ID for this entry"
|
||||
|
||||
uuid = types.uuid
|
||||
"Unique UUID for this entry"
|
||||
|
||||
from_version = wtypes.text
|
||||
"The from version for the kubernetes upgrade"
|
||||
|
||||
to_version = wtypes.text
|
||||
"The to version for the kubernetes upgrade"
|
||||
|
||||
state = wtypes.text
|
||||
"Kubernetes upgrade state"
|
||||
|
||||
links = [link.Link]
|
||||
"A list containing a self link and associated kubernetes upgrade links"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.fields = objects.kube_upgrade.fields.keys()
|
||||
for k in self.fields:
|
||||
if not hasattr(self, k):
|
||||
continue
|
||||
setattr(self, k, kwargs.get(k, wtypes.Unset))
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, rpc_kube_upgrade, expand=True):
|
||||
kube_upgrade = KubeUpgrade(**rpc_kube_upgrade.as_dict())
|
||||
if not expand:
|
||||
kube_upgrade.unset_fields_except(['uuid', 'from_version',
|
||||
'to_version', 'state'])
|
||||
|
||||
kube_upgrade.links = [
|
||||
link.Link.make_link('self', pecan.request.host_url,
|
||||
'kube_upgrade', kube_upgrade.uuid),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'kube_upgrade', kube_upgrade.uuid,
|
||||
bookmark=True)
|
||||
]
|
||||
return kube_upgrade
|
||||
|
||||
|
||||
class KubeUpgradeCollection(collection.Collection):
|
||||
"""API representation of a collection of kubernetes upgrades."""
|
||||
|
||||
kube_upgrades = [KubeUpgrade]
|
||||
"A list containing kubernetes upgrade objects"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._type = 'kube_upgrades'
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, rpc_kube_upgrade, expand=True, **kwargs):
|
||||
collection = KubeUpgradeCollection()
|
||||
collection.kube_upgrades = [KubeUpgrade.convert_with_links(p, expand)
|
||||
for p in rpc_kube_upgrade]
|
||||
return collection
|
||||
|
||||
|
||||
LOCK_NAME = 'KubeUpgradeController'
|
||||
|
||||
|
||||
class KubeUpgradeController(rest.RestController):
|
||||
"""REST controller for kubernetes upgrades."""
|
||||
|
||||
def __init__(self):
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
def _get_updates(self, patch):
|
||||
"""Retrieve the updated attributes from the patch request."""
|
||||
updates = {}
|
||||
for p in patch:
|
||||
attribute = p['path'] if p['path'][0] != '/' else p['path'][1:]
|
||||
updates[attribute] = p['value']
|
||||
return updates
|
||||
|
||||
@wsme_pecan.wsexpose(KubeUpgradeCollection)
|
||||
def get_all(self):
|
||||
"""Retrieve a list of kubernetes upgrades."""
|
||||
|
||||
kube_upgrades = pecan.request.dbapi.kube_upgrade_get_list()
|
||||
return KubeUpgradeCollection.convert_with_links(kube_upgrades)
|
||||
|
||||
@wsme_pecan.wsexpose(KubeUpgrade, types.uuid)
|
||||
def get_one(self, uuid):
|
||||
"""Retrieve information about the given kubernetes upgrade."""
|
||||
|
||||
rpc_kube_upgrade = objects.kube_upgrade.get_by_uuid(
|
||||
pecan.request.context, uuid)
|
||||
return KubeUpgrade.convert_with_links(rpc_kube_upgrade)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(KubeUpgrade, wtypes.text, body=six.text_type)
|
||||
def post(self, to_version, body):
|
||||
"""Create a new Kubernetes Upgrade and start upgrade."""
|
||||
|
||||
force = body.get('force', False) is True
|
||||
|
||||
# There must not already be a kubernetes upgrade in progress
|
||||
try:
|
||||
pecan.request.dbapi.kube_upgrade_get_one()
|
||||
except exception.NotFound:
|
||||
pass
|
||||
else:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"A kubernetes upgrade is already in progress"))
|
||||
|
||||
# The target version must be available
|
||||
try:
|
||||
target_version_obj = objects.kube_version.get_by_version(
|
||||
to_version)
|
||||
except exception.KubeVersionNotFound:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Kubernetes version %s is not available" % to_version))
|
||||
|
||||
# The upgrade path must be supported
|
||||
current_kube_version = \
|
||||
self._kube_operator.kube_get_kubernetes_version()
|
||||
if not target_version_obj.can_upgrade_from(current_kube_version):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The installed Kubernetes version %s cannot upgrade to "
|
||||
"version %s" % (current_kube_version,
|
||||
target_version_obj.version)))
|
||||
|
||||
# The current kubernetes version must be active
|
||||
version_states = self._kube_operator.kube_get_version_states()
|
||||
if version_states.get(current_kube_version) != \
|
||||
kubernetes.KUBE_STATE_ACTIVE:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The installed Kubernetes version %s is not active on all "
|
||||
"hosts" % current_kube_version))
|
||||
|
||||
# The necessary patches must be applied
|
||||
api_token = None
|
||||
system = pecan.request.dbapi.isystem_get_one()
|
||||
if target_version_obj.applied_patches:
|
||||
patches_applied = patch_api.patch_is_applied(
|
||||
token=api_token,
|
||||
timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,
|
||||
region_name=system.region_name,
|
||||
patches=target_version_obj.applied_patches
|
||||
)
|
||||
if not patches_applied:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The following patches must be applied before starting "
|
||||
"the kubernetes upgrade: %s" %
|
||||
target_version_obj.applied_patches))
|
||||
|
||||
# The necessary patches must be available
|
||||
if target_version_obj.available_patches:
|
||||
patches_available = patch_api.patch_is_available(
|
||||
token=api_token,
|
||||
timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,
|
||||
region_name=system.region_name,
|
||||
patches=target_version_obj.available_patches
|
||||
)
|
||||
if not patches_available:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The following patches must be available before starting "
|
||||
"the kubernetes upgrade: %s" %
|
||||
target_version_obj.available_patches))
|
||||
|
||||
# TODO: check that all installed applications support new k8s version
|
||||
# TODO: check that tiller/armada support new k8s version
|
||||
|
||||
# The system must be healthy from the platform perspective
|
||||
success, output = pecan.request.rpcapi.get_system_health(
|
||||
pecan.request.context, force=force)
|
||||
if not success:
|
||||
LOG.info("Health query failure during kubernetes upgrade start: %s"
|
||||
% output)
|
||||
if os.path.exists(constants.SYSINV_RUNNING_IN_LAB) and force:
|
||||
LOG.info("Running in lab, ignoring health errors.")
|
||||
else:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"System is not in a valid state for kubernetes upgrade. "
|
||||
"Run system health-query-upgrade for more details."))
|
||||
|
||||
# TODO: kubernetes related health checks...
|
||||
|
||||
# Tell the conductor to download the images for the new version
|
||||
pecan.request.rpcapi.kube_download_images(
|
||||
pecan.request.context, to_version)
|
||||
|
||||
# Create upgrade record.
|
||||
create_values = {'from_version': current_kube_version,
|
||||
'to_version': to_version,
|
||||
'state': kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES}
|
||||
new_upgrade = pecan.request.dbapi.kube_upgrade_create(create_values)
|
||||
|
||||
# Set the target version for each host to the current version
|
||||
update_values = {'target_version': current_kube_version}
|
||||
kube_host_upgrades = pecan.request.dbapi.kube_host_upgrade_get_list()
|
||||
for kube_host_upgrade in kube_host_upgrades:
|
||||
pecan.request.dbapi.kube_host_upgrade_update(kube_host_upgrade.id,
|
||||
update_values)
|
||||
|
||||
LOG.info("Starting kubernetes upgrade from version: %s to version: %s"
|
||||
% (current_kube_version, to_version))
|
||||
|
||||
return KubeUpgrade.convert_with_links(new_upgrade)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme.validate([KubeUpgradePatchType])
|
||||
@wsme_pecan.wsexpose(KubeUpgrade, body=[KubeUpgradePatchType])
|
||||
def patch(self, patch):
|
||||
"""Updates attributes of a Kubernetes Upgrade."""
|
||||
|
||||
updates = self._get_updates(patch)
|
||||
|
||||
# Get the current upgrade
|
||||
try:
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(
|
||||
pecan.request.context)
|
||||
except exception.NotFound:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"A kubernetes upgrade is not in progress"))
|
||||
|
||||
if updates['state'] == kubernetes.KUBE_UPGRADING_NETWORKING:
|
||||
# Make sure upgrade is in the correct state to upgrade networking
|
||||
if kube_upgrade_obj.state != \
|
||||
kubernetes.KUBE_UPGRADED_FIRST_MASTER:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Kubernetes upgrade must be in %s state to upgrade "
|
||||
"networking" %
|
||||
kubernetes.KUBE_UPGRADED_FIRST_MASTER))
|
||||
|
||||
# Tell the conductor to upgrade networking
|
||||
pecan.request.rpcapi.kube_upgrade_networking(
|
||||
pecan.request.context, kube_upgrade_obj.to_version)
|
||||
|
||||
# Update the upgrade state
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_NETWORKING
|
||||
kube_upgrade_obj.save()
|
||||
return KubeUpgrade.convert_with_links(kube_upgrade_obj)
|
||||
else:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Invalid state %s supplied" % updates['state']))
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(KubeUpgrade)
|
||||
def delete(self):
|
||||
"""Delete Kubernetes Upgrade."""
|
||||
|
||||
# An upgrade must be in progress
|
||||
try:
|
||||
kube_upgrade_obj = pecan.request.dbapi.kube_upgrade_get_one()
|
||||
except exception.NotFound:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"A kubernetes upgrade is not in progress"))
|
||||
|
||||
# The upgrade must be complete
|
||||
if kube_upgrade_obj.state != \
|
||||
kubernetes.KUBE_UPGRADE_COMPLETE:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Kubernetes upgrade must be in %s state to delete" %
|
||||
kubernetes.KUBE_UPGRADE_COMPLETE))
|
||||
|
||||
# Delete the upgrade
|
||||
pecan.request.dbapi.kube_upgrade_destroy(kube_upgrade_obj.id)
|
|
@ -4,12 +4,14 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from sysinv.api.controllers.v1 import base
|
||||
from sysinv.api.controllers.v1 import collection
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv import objects
|
||||
|
||||
|
@ -81,29 +83,45 @@ class KubeVersionController(rest.RestController):
|
|||
self._parent = parent
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
@staticmethod
|
||||
def _update_target(version_obj, upgrade_to_version):
|
||||
"""Determines whether this is the target version"""
|
||||
|
||||
if upgrade_to_version is not None:
|
||||
if upgrade_to_version == version_obj.version:
|
||||
# We are in an upgrade and this is the to_version
|
||||
version_obj.target = True
|
||||
else:
|
||||
# We are in an upgrade and this is not the to_version
|
||||
version_obj.target = False
|
||||
elif version_obj.state == kubernetes.KUBE_STATE_ACTIVE:
|
||||
# We are not in an upgrade and this is the active version
|
||||
version_obj.target = True
|
||||
else:
|
||||
# This is not the version you are looking for
|
||||
version_obj.target = False
|
||||
|
||||
@wsme_pecan.wsexpose(KubeVersionCollection)
|
||||
def get_all(self):
|
||||
"""Retrieve a list of kubernetes versions."""
|
||||
|
||||
# Get the current upgrade (if one exists)
|
||||
upgrade_to_version = None
|
||||
try:
|
||||
kube_upgrade_obj = pecan.request.dbapi.kube_upgrade_get_one()
|
||||
upgrade_to_version = kube_upgrade_obj.to_version
|
||||
except exception.NotFound:
|
||||
pass
|
||||
|
||||
# Get the dynamic version information
|
||||
version_states = self._kube_operator.kube_get_version_states()
|
||||
|
||||
rpc_kube_versions = []
|
||||
for version in kubernetes.get_kube_versions():
|
||||
version_obj = KubeVersion()
|
||||
version_obj.version = version['version']
|
||||
version_obj.upgrade_from = version['upgrade_from']
|
||||
version_obj.downgrade_to = version['downgrade_to']
|
||||
version_obj.applied_patches = version['applied_patches']
|
||||
version_obj.available_patches = version['available_patches']
|
||||
version_obj = objects.kube_version.get_by_version(
|
||||
version['version'])
|
||||
version_obj.state = version_states[version['version']]
|
||||
# For now, the active version will be marked as the target. When
|
||||
# upgrades are supported, we will also have to consider whether
|
||||
# an upgrade is in progress to determine the target.
|
||||
if version_obj.state == kubernetes.KUBE_STATE_ACTIVE:
|
||||
version_obj.target = True
|
||||
else:
|
||||
version_obj.target = False
|
||||
self._update_target(version_obj, upgrade_to_version)
|
||||
rpc_kube_versions.append(version_obj)
|
||||
|
||||
return KubeVersionCollection.convert_with_links(rpc_kube_versions)
|
||||
|
@ -118,11 +136,15 @@ class KubeVersionController(rest.RestController):
|
|||
# Get the dynamic version information
|
||||
version_states = self._kube_operator.kube_get_version_states()
|
||||
rpc_kube_version.state = version_states[version]
|
||||
# For now, the active version will be marked as the target. When
|
||||
# upgrades are supported, we will also have to consider whether
|
||||
# an upgrade is in progress to determine the target.
|
||||
if rpc_kube_version.state == kubernetes.KUBE_STATE_ACTIVE:
|
||||
rpc_kube_version.target = True
|
||||
else:
|
||||
rpc_kube_version.target = False
|
||||
|
||||
# Get the current upgrade (if one exists)
|
||||
upgrade_to_version = None
|
||||
try:
|
||||
kube_upgrade_obj = pecan.request.dbapi.kube_upgrade_get_one()
|
||||
upgrade_to_version = kube_upgrade_obj.to_version
|
||||
except exception.NotFound:
|
||||
pass
|
||||
|
||||
self._update_target(rpc_kube_version, upgrade_to_version)
|
||||
|
||||
return KubeVersion.convert_with_links(rpc_kube_version)
|
||||
|
|
|
@ -85,6 +85,27 @@ def patch_is_applied(token, timeout, region_name, patches):
|
|||
return response
|
||||
|
||||
|
||||
def patch_is_available(token, timeout, region_name, patches):
|
||||
"""
|
||||
Query the available state for a list of patches
|
||||
"""
|
||||
api_cmd = None
|
||||
|
||||
if not token:
|
||||
token = get_token(region_name)
|
||||
if token:
|
||||
api_cmd = token.get_service_url("patching", "patching")
|
||||
|
||||
patch_dependencies = ""
|
||||
for patch in patches:
|
||||
patch_dependencies += "/%s" % patch
|
||||
|
||||
api_cmd += "/v1/is_available%s" % patch_dependencies
|
||||
|
||||
response = rest_api_request(token, "GET", api_cmd, timeout=timeout)
|
||||
return response
|
||||
|
||||
|
||||
def patch_report_app_dependencies(token, timeout, region_name, patches, app_name):
|
||||
"""
|
||||
Report the application patch dependencies
|
||||
|
|
|
@ -1507,9 +1507,11 @@ ANSIBLE_BOOTSTRAP_FLAG = os.path.join(tsc.VOLATILE_PATH, ".ansible_bootstrap")
|
|||
UNLOCK_READY_FLAG = os.path.join(tsc.PLATFORM_CONF_PATH, ".unlock_ready")
|
||||
INVENTORY_WAIT_TIMEOUT_IN_SECS = 90
|
||||
|
||||
# Ansible kubernetes networking playbook
|
||||
# Ansible playbooks
|
||||
ANSIBLE_KUBE_NETWORKING_PLAYBOOK = \
|
||||
'/usr/share/ansible/stx-ansible/playbooks/upgrade-k8s-networking.yml'
|
||||
ANSIBLE_KUBE_PUSH_IMAGES_PLAYBOOK = \
|
||||
'/usr/share/ansible/stx-ansible/playbooks/push_k8s_images.yml'
|
||||
|
||||
# Clock synchronization types
|
||||
NTP = 'ntp'
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
from __future__ import absolute_import
|
||||
from distutils.version import LooseVersion
|
||||
import json
|
||||
import re
|
||||
|
||||
from kubernetes import config
|
||||
from kubernetes import client
|
||||
|
@ -39,6 +40,25 @@ KUBE_APISERVER = 'kube-apiserver'
|
|||
KUBE_CONTROLLER_MANAGER = 'kube-controller-manager'
|
||||
KUBE_SCHEDULER = 'kube-scheduler'
|
||||
|
||||
# Kubernetes upgrade states
|
||||
KUBE_UPGRADE_DOWNLOADING_IMAGES = 'downloading-images'
|
||||
KUBE_UPGRADE_STARTED = 'upgrade-started'
|
||||
KUBE_UPGRADING_FIRST_MASTER = 'upgrading-first-master'
|
||||
KUBE_UPGRADED_FIRST_MASTER = 'upgraded-first-master'
|
||||
KUBE_UPGRADING_NETWORKING = 'upgrading-networking'
|
||||
KUBE_UPGRADED_NETWORKING = 'upgraded-networking'
|
||||
KUBE_UPGRADING_SECOND_MASTER = 'upgrading-second-master'
|
||||
KUBE_UPGRADED_SECOND_MASTER = 'upgraded-second-master'
|
||||
KUBE_UPGRADING_KUBELETS = 'upgrading-kubelets'
|
||||
KUBE_UPGRADE_COMPLETE = 'upgrade-complete'
|
||||
KUBE_UPGRADE_FAILED = 'upgrade-failed'
|
||||
|
||||
# Kubernetes constants
|
||||
MANIFEST_APPLY_TIMEOUT = 60 * 15
|
||||
MANIFEST_APPLY_INTERVAL = 10
|
||||
POD_START_TIMEOUT = 60
|
||||
POD_START_INTERVAL = 10
|
||||
|
||||
|
||||
def get_kube_versions():
|
||||
"""Provides a list of supported kubernetes versions."""
|
||||
|
@ -446,3 +466,20 @@ class KubeOperator(object):
|
|||
version_states[active_candidates[0]] = KUBE_STATE_ACTIVE
|
||||
|
||||
return version_states
|
||||
|
||||
def kube_get_kubernetes_version(self):
|
||||
"""Returns the kubernetes version from the kubadm config map."""
|
||||
|
||||
c = self._get_kubernetesclient_core()
|
||||
|
||||
# Get the kubernetes version from the kubeadm config map
|
||||
config_map = c.read_namespaced_config_map('kubeadm-config',
|
||||
NAMESPACE_KUBE_SYSTEM)
|
||||
cluster_config = config_map.data['ClusterConfiguration']
|
||||
match = re.search('\nkubernetesVersion: (.*)\n', cluster_config)
|
||||
if match is None:
|
||||
LOG.error("Unable to find kubernetesVersion in kubeadm-config %s" %
|
||||
config_map)
|
||||
return None
|
||||
else:
|
||||
return match.group(1)
|
||||
|
|
|
@ -39,7 +39,6 @@ import os
|
|||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import tempfile
|
||||
import time
|
||||
import uuid
|
||||
|
@ -56,6 +55,8 @@ from cryptography.hazmat.backends import default_backend
|
|||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from eventlet import greenthread
|
||||
# Make subprocess module greenthread friendly
|
||||
from eventlet.green import subprocess
|
||||
from fm_api import constants as fm_constants
|
||||
from fm_api import fm_api
|
||||
from netaddr import IPAddress
|
||||
|
@ -92,6 +93,7 @@ from sysinv.conductor import kube_app
|
|||
from sysinv.conductor import openstack
|
||||
from sysinv.conductor import docker_registry
|
||||
from sysinv.db import api as dbapi
|
||||
from sysinv import objects
|
||||
from sysinv.objects import base as objects_base
|
||||
from sysinv.objects import kube_app as kubeapp_obj
|
||||
from sysinv.openstack.common import context as ctx
|
||||
|
@ -10683,3 +10685,223 @@ class ConductorManager(service.PeriodicService):
|
|||
constants.SYSINV_CONF_DEFAULT_PATH))
|
||||
|
||||
os.chmod(constants.SYSINV_CONF_DEFAULT_PATH, 0o400)
|
||||
|
||||
def kube_download_images(self, context, kube_version):
|
||||
"""Download the kubernetes images for this version"""
|
||||
|
||||
LOG.info("executing playbook: %s for version %s" %
|
||||
(constants.ANSIBLE_KUBE_PUSH_IMAGES_PLAYBOOK, kube_version))
|
||||
|
||||
proc = subprocess.Popen(
|
||||
['ansible-playbook', '-e', 'kubernetes_version=%s' % kube_version,
|
||||
constants.ANSIBLE_KUBE_PUSH_IMAGES_PLAYBOOK],
|
||||
stdout=subprocess.PIPE)
|
||||
out, _ = proc.communicate()
|
||||
|
||||
LOG.info("ansible-playbook: %s." % out)
|
||||
|
||||
if proc.returncode:
|
||||
LOG.warning("ansible-playbook returned an error: %s" %
|
||||
proc.returncode)
|
||||
new_state = kubernetes.KUBE_UPGRADE_FAILED
|
||||
else:
|
||||
new_state = kubernetes.KUBE_UPGRADE_STARTED
|
||||
|
||||
# Update the upgrade state
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = new_state
|
||||
kube_upgrade_obj.save()
|
||||
|
||||
def kube_upgrade_control_plane(self, context, host_uuid):
|
||||
"""Upgrade the kubernetes control plane on this host"""
|
||||
|
||||
host_obj = objects.host.get_by_uuid(context, host_uuid)
|
||||
host_name = host_obj.hostname
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
target_version = kube_host_upgrade_obj.target_version
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
|
||||
if kube_upgrade_obj.state == kubernetes.KUBE_UPGRADING_FIRST_MASTER:
|
||||
puppet_class = 'platform::kubernetes::upgrade_first_control_plane'
|
||||
new_state = kubernetes.KUBE_UPGRADED_FIRST_MASTER
|
||||
elif kube_upgrade_obj.state == kubernetes.KUBE_UPGRADING_SECOND_MASTER:
|
||||
puppet_class = 'platform::kubernetes::upgrade_control_plane'
|
||||
new_state = kubernetes.KUBE_UPGRADED_SECOND_MASTER
|
||||
else:
|
||||
LOG.error("Invalid state %s to upgrade control plane." %
|
||||
kube_upgrade_obj.state)
|
||||
return
|
||||
|
||||
# Update status
|
||||
kube_host_upgrade_obj.status = "Upgrading control plane"
|
||||
kube_host_upgrade_obj.save()
|
||||
|
||||
# Update the config for this host
|
||||
personalities = [host_obj.personality]
|
||||
config_uuid = self._config_update_hosts(context, personalities,
|
||||
[host_uuid])
|
||||
|
||||
# Apply the runtime manifest to upgrade the control plane
|
||||
config_dict = {
|
||||
"personalities": personalities,
|
||||
"host_uuids": [host_uuid],
|
||||
"classes": [puppet_class]
|
||||
}
|
||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||
|
||||
# Wait for the manifest to be applied
|
||||
elapsed = 0
|
||||
while elapsed < kubernetes.MANIFEST_APPLY_TIMEOUT:
|
||||
elapsed += kubernetes.MANIFEST_APPLY_INTERVAL
|
||||
greenthread.sleep(kubernetes.MANIFEST_APPLY_INTERVAL)
|
||||
host_obj = objects.host.get_by_uuid(context, host_uuid)
|
||||
if host_obj.config_target == host_obj.config_applied:
|
||||
LOG.info("Config was applied for host %s" % host_name)
|
||||
break
|
||||
LOG.debug("Waiting for config apply on host %s" % host_name)
|
||||
else:
|
||||
LOG.warning("Manifest apply failed for host %s" % host_name)
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
kube_host_upgrade_obj.status = "Control plane upgrade failed"
|
||||
kube_host_upgrade_obj.save()
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_FAILED
|
||||
kube_upgrade_obj.save()
|
||||
return
|
||||
|
||||
# Wait for the control plane pods to start with the new version
|
||||
kube_operator = kubernetes.KubeOperator()
|
||||
elapsed = 0
|
||||
while elapsed < kubernetes.POD_START_TIMEOUT:
|
||||
elapsed += kubernetes.POD_START_INTERVAL
|
||||
greenthread.sleep(kubernetes.POD_START_INTERVAL)
|
||||
cp_versions = kube_operator.kube_get_control_plane_versions()
|
||||
if cp_versions.get(host_name, None) == target_version:
|
||||
LOG.info("Control plane was updated for host %s" % host_name)
|
||||
break
|
||||
LOG.debug("Waiting for control plane update on host %s" % host_name)
|
||||
else:
|
||||
LOG.warning("Control plane upgrade failed for host %s" %
|
||||
host_name)
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
kube_host_upgrade_obj.status = "Control plane upgrade failed"
|
||||
kube_host_upgrade_obj.save()
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_FAILED
|
||||
kube_upgrade_obj.save()
|
||||
return
|
||||
|
||||
# The control plane update was successful
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
kube_host_upgrade_obj.status = None
|
||||
kube_host_upgrade_obj.save()
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = new_state
|
||||
kube_upgrade_obj.save()
|
||||
|
||||
def kube_upgrade_kubelet(self, context, host_uuid):
|
||||
"""Upgrade the kubernetes kubelet on this host"""
|
||||
|
||||
host_obj = objects.host.get_by_uuid(context, host_uuid)
|
||||
host_name = host_obj.hostname
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
target_version = kube_host_upgrade_obj.target_version
|
||||
|
||||
if host_obj.personality == constants.CONTROLLER:
|
||||
puppet_class = 'platform::kubernetes::master::upgrade_kubelet'
|
||||
elif host_obj.personality == constants.WORKER:
|
||||
puppet_class = 'platform::kubernetes::worker::upgrade_kubelet'
|
||||
else:
|
||||
LOG.error("Invalid personality %s to upgrade kubelet." %
|
||||
host_obj.personality)
|
||||
return
|
||||
|
||||
# Update status
|
||||
kube_host_upgrade_obj.status = "Upgrading kubelet"
|
||||
kube_host_upgrade_obj.save()
|
||||
|
||||
# Update the config for this host
|
||||
personalities = [host_obj.personality]
|
||||
config_uuid = self._config_update_hosts(context, personalities,
|
||||
[host_uuid])
|
||||
|
||||
# Apply the runtime manifest to upgrade the kubelet
|
||||
config_dict = {
|
||||
"personalities": personalities,
|
||||
"host_uuids": [host_uuid],
|
||||
"classes": [puppet_class]
|
||||
}
|
||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||
|
||||
# Wait for the manifest to be applied
|
||||
elapsed = 0
|
||||
while elapsed < kubernetes.MANIFEST_APPLY_TIMEOUT:
|
||||
elapsed += kubernetes.MANIFEST_APPLY_INTERVAL
|
||||
greenthread.sleep(kubernetes.MANIFEST_APPLY_INTERVAL)
|
||||
host_obj = objects.host.get_by_uuid(context, host_uuid)
|
||||
if host_obj.config_target == host_obj.config_applied:
|
||||
LOG.info("Config was applied for host %s" % host_name)
|
||||
break
|
||||
LOG.debug("Waiting for config apply on host %s" % host_name)
|
||||
else:
|
||||
LOG.warning("Manifest apply failed for host %s" % host_name)
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
kube_host_upgrade_obj.status = "Control plane upgrade failed"
|
||||
kube_host_upgrade_obj.save()
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_FAILED
|
||||
kube_upgrade_obj.save()
|
||||
return
|
||||
|
||||
# Wait for the kubelet to start with the new version
|
||||
kube_operator = kubernetes.KubeOperator()
|
||||
elapsed = 0
|
||||
while elapsed < kubernetes.POD_START_TIMEOUT:
|
||||
elapsed += kubernetes.POD_START_INTERVAL
|
||||
greenthread.sleep(kubernetes.POD_START_INTERVAL)
|
||||
kubelet_versions = kube_operator.kube_get_kubelet_versions()
|
||||
if kubelet_versions.get(host_name, None) == target_version:
|
||||
LOG.info("Kubelet was updated for host %s" % host_name)
|
||||
break
|
||||
LOG.debug("Waiting for kubelet update on host %s" % host_name)
|
||||
else:
|
||||
LOG.warning("Kubelet upgrade failed for host %s" % host_name)
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
kube_host_upgrade_obj.status = "Kubelet upgrade failed"
|
||||
kube_host_upgrade_obj.save()
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_FAILED
|
||||
kube_upgrade_obj.save()
|
||||
return
|
||||
|
||||
# The kubelet update was successful
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
context, host_obj.id)
|
||||
kube_host_upgrade_obj.status = None
|
||||
kube_host_upgrade_obj.save()
|
||||
|
||||
# Check whether the upgrade is complete
|
||||
version_states = kube_operator.kube_get_version_states()
|
||||
if version_states.get(target_version, None) == \
|
||||
kubernetes.KUBE_STATE_ACTIVE:
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_COMPLETE
|
||||
kube_upgrade_obj.save()
|
||||
|
||||
def kube_upgrade_networking(self, context, kube_version):
|
||||
"""Upgrade kubernetes networking for this kubernetes version"""
|
||||
|
||||
# TODO: Upgrade kubernetes networking.
|
||||
LOG.info("Upgrade kubernetes networking here")
|
||||
|
||||
# Indicate that networking upgrade is complete
|
||||
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
|
||||
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADED_NETWORKING
|
||||
kube_upgrade_obj.save()
|
||||
|
|
|
@ -1821,3 +1821,43 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
:return:
|
||||
"""
|
||||
return self.call(context, self.make_msg('store_default_config'))
|
||||
|
||||
def kube_download_images(self, context, kube_version):
|
||||
"""Asynchronously, have the conductor download the kubernetes images
|
||||
for this new version.
|
||||
|
||||
:param context: request context
|
||||
:param kube_version: kubernetes version to download
|
||||
"""
|
||||
return self.cast(context, self.make_msg('kube_download_images',
|
||||
kube_version=kube_version))
|
||||
|
||||
def kube_upgrade_control_plane(self, context, host_uuid):
|
||||
"""Asynchronously, have the conductor upgrade the kubernetes control
|
||||
plane on this host.
|
||||
|
||||
:param context: request context
|
||||
:param host_uuid: uuid or id of the host
|
||||
"""
|
||||
return self.cast(context, self.make_msg(
|
||||
'kube_upgrade_control_plane', host_uuid=host_uuid))
|
||||
|
||||
def kube_upgrade_kubelet(self, context, host_uuid):
|
||||
"""Asynchronously, have the conductor upgrade the kubernetes kubelet
|
||||
plane on this host.
|
||||
|
||||
:param context: request context
|
||||
:param host_uuid: uuid or id of the host
|
||||
"""
|
||||
return self.cast(context, self.make_msg('kube_upgrade_kubelet',
|
||||
host_uuid=host_uuid))
|
||||
|
||||
def kube_upgrade_networking(self, context, kube_version):
|
||||
"""Asynchronously, have the conductor upgrade networking for this
|
||||
new version.
|
||||
|
||||
:param context: request context
|
||||
:param kube_version: kubernetes version being upgraded to
|
||||
"""
|
||||
return self.cast(context, self.make_msg('kube_upgrade_networking',
|
||||
kube_version=kube_version))
|
||||
|
|
|
@ -1269,6 +1269,7 @@ class Connection(api.Connection):
|
|||
except db_exc.DBDuplicateEntry:
|
||||
raise exception.NodeAlreadyExists(uuid=values['uuid'])
|
||||
self._host_upgrade_create(host.id, software_load)
|
||||
self._kube_host_upgrade_create(host.id)
|
||||
return self._host_get(values['uuid'])
|
||||
|
||||
@objects.objectify(objects.host)
|
||||
|
@ -8091,8 +8092,9 @@ class Connection(api.Connection):
|
|||
|
||||
return result
|
||||
|
||||
@objects.objectify(objects.kube_host_upgrade)
|
||||
def kube_host_upgrade_create(self, host_id, values):
|
||||
def _kube_host_upgrade_create(self, host_id, values=None):
|
||||
if values is None:
|
||||
values = dict()
|
||||
if not values.get('uuid'):
|
||||
values['uuid'] = uuidutils.generate_uuid()
|
||||
values['host_id'] = int(host_id)
|
||||
|
@ -8105,9 +8107,12 @@ class Connection(api.Connection):
|
|||
except db_exc.DBDuplicateEntry:
|
||||
raise exception.KubeHostUpgradeAlreadyExists(
|
||||
uuid=values['uuid'], host=host_id)
|
||||
|
||||
return self._kube_host_upgrade_get(values['uuid'])
|
||||
|
||||
@objects.objectify(objects.kube_host_upgrade)
|
||||
def kube_host_upgrade_create(self, host_id, values):
|
||||
return self._kube_host_upgrade_create(host_id, values)
|
||||
|
||||
@objects.objectify(objects.kube_host_upgrade)
|
||||
def kube_host_upgrade_get(self, host_upgrade_id):
|
||||
return self._kube_host_upgrade_get(host_upgrade_id)
|
||||
|
@ -8115,8 +8120,15 @@ class Connection(api.Connection):
|
|||
@objects.objectify(objects.kube_host_upgrade)
|
||||
def kube_host_upgrade_get_list(self, limit=None, marker=None,
|
||||
sort_key=None, sort_dir=None):
|
||||
query = model_query(models.KubeHostUpgrade)
|
||||
# Only retrieve host upgrade records associated with actual hosts
|
||||
# (not host profiles).
|
||||
query = query.join(models.ihost,
|
||||
models.KubeHostUpgrade.host_id == models.ihost.id)
|
||||
query = query.filter(models.ihost.recordtype == "standard")
|
||||
|
||||
return _paginate_query(models.KubeHostUpgrade, limit, marker,
|
||||
sort_key, sort_dir)
|
||||
sort_key, sort_dir, query)
|
||||
|
||||
@objects.objectify(objects.kube_host_upgrade)
|
||||
def kube_host_upgrade_get_by_host(self, host_id):
|
||||
|
@ -8173,8 +8185,8 @@ class Connection(api.Connection):
|
|||
return self._kube_upgrade_get(values['uuid'])
|
||||
|
||||
@objects.objectify(objects.kube_upgrade)
|
||||
def kube_upgrade_get(self, server):
|
||||
return self._kube_upgrade_get(server)
|
||||
def kube_upgrade_get(self, upgrade_id):
|
||||
return self._kube_upgrade_get(upgrade_id)
|
||||
|
||||
@objects.objectify(objects.kube_upgrade)
|
||||
def kube_upgrade_get_one(self):
|
||||
|
|
|
@ -21,9 +21,6 @@ def upgrade(migrate_engine):
|
|||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
kube_upgrade = Table(
|
||||
'kube_upgrade',
|
||||
meta,
|
||||
|
|
|
@ -1742,8 +1742,6 @@ class KubeHostUpgrade(Base):
|
|||
|
||||
host_id = Column(Integer, ForeignKey('i_host.id', ondelete='CASCADE'))
|
||||
|
||||
host = relationship("ihost", lazy="joined", join_depth=1)
|
||||
|
||||
|
||||
class KubeUpgrade(Base):
|
||||
__tablename__ = 'kube_upgrade'
|
||||
|
|
|
@ -21,18 +21,19 @@ class KubeHostUpgrade(base.SysinvObject):
|
|||
'id': int,
|
||||
'uuid': utils.str_or_none,
|
||||
|
||||
'target_version': utils.int_or_none,
|
||||
'target_version': utils.str_or_none,
|
||||
'status': utils.str_or_none,
|
||||
'control_plane_version': utils.str_or_none, # Not stored in DB
|
||||
'kubelet_version': utils.str_or_none, # Not stored in DB
|
||||
'reserved_1': utils.str_or_none,
|
||||
'reserved_2': utils.str_or_none,
|
||||
'reserved_3': utils.str_or_none,
|
||||
'reserved_4': utils.str_or_none,
|
||||
|
||||
'host_id': int,
|
||||
'host_uuid': utils.str_or_none,
|
||||
}
|
||||
|
||||
_foreign_fields = {'host_uuid': 'host:uuid'}
|
||||
_optional_fields = ['control_plane_version', 'kubelet_version']
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_by_uuid(cls, context, uuid):
|
||||
|
|
|
@ -22,7 +22,7 @@ class KubeUpgrade(base.SysinvObject):
|
|||
'uuid': utils.str_or_none,
|
||||
|
||||
'from_version': utils.str_or_none,
|
||||
'to_version': utils.int_or_none,
|
||||
'to_version': utils.str_or_none,
|
||||
'state': utils.str_or_none,
|
||||
'reserved_1': utils.str_or_none,
|
||||
'reserved_2': utils.str_or_none,
|
||||
|
@ -34,6 +34,10 @@ class KubeUpgrade(base.SysinvObject):
|
|||
def get_by_uuid(cls, context, uuid):
|
||||
return cls.dbapi.kube_upgrade_get(uuid)
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_one(cls, context):
|
||||
return cls.dbapi.kube_upgrade_get_one()
|
||||
|
||||
def save_changes(self, context, updates):
|
||||
self.dbapi.kube_upgrade_update(self.uuid, # pylint: disable=no-member
|
||||
updates)
|
||||
|
|
|
@ -14,7 +14,9 @@ import subprocess
|
|||
from oslo_log import log as logging
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils
|
||||
from sysinv import objects
|
||||
from sysinv.puppet import base
|
||||
from sysinv.puppet import interface
|
||||
|
||||
|
@ -29,6 +31,10 @@ class KubernetesPuppet(base.BasePuppet):
|
|||
"""Class to encapsulate puppet operations for kubernetes configuration"""
|
||||
ETCD_SERVICE_PORT = '2379'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(KubernetesPuppet, self).__init__(*args, **kwargs)
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
def get_system_config(self):
|
||||
config = {}
|
||||
config.update(
|
||||
|
@ -95,6 +101,9 @@ class KubernetesPuppet(base.BasePuppet):
|
|||
# Generate the token and join command for this host.
|
||||
config.update(self._get_host_join_command(host))
|
||||
|
||||
# Get the kubernetes version for this host
|
||||
config.update(self._get_kubernetes_version(host))
|
||||
|
||||
return config
|
||||
|
||||
def _get_host_join_command(self, host):
|
||||
|
@ -151,6 +160,28 @@ class KubernetesPuppet(base.BasePuppet):
|
|||
subnet = netaddr.IPNetwork(self._get_cluster_service_subnet())
|
||||
return str(subnet[CLUSTER_SERVICE_DNS_IP_OFFSET])
|
||||
|
||||
def _get_kubernetes_version(self, host):
|
||||
config = {}
|
||||
|
||||
# Get the kubernetes upgrade record for this host
|
||||
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
|
||||
self.context, host.id)
|
||||
version = kube_host_upgrade_obj.target_version
|
||||
if version is None:
|
||||
# The target version is not set if an upgrade hasn't been started,
|
||||
# so get the running kubernetes version.
|
||||
try:
|
||||
version = self._kube_operator.kube_get_kubernetes_version()
|
||||
except Exception:
|
||||
# During initial installation of the first controller,
|
||||
# kubernetes may not be running yet. In that case, none of the
|
||||
# puppet manifests being applied will need the kubernetes
|
||||
# version.
|
||||
LOG.warning("Unable to retrieve kubernetes version")
|
||||
|
||||
config.update({'platform::kubernetes::params::version': version})
|
||||
return config
|
||||
|
||||
def _get_host_node_config(self, host):
|
||||
node_ip = self._get_address_by_name(
|
||||
host.hostname, constants.NETWORK_TYPE_MGMT).address
|
||||
|
|
|
@ -16,6 +16,7 @@ import webtest.app
|
|||
from six.moves import http_client
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.openstack.common import uuidutils
|
||||
|
||||
from sysinv.tests.api import base
|
||||
|
@ -36,6 +37,8 @@ class FakeConductorAPI(object):
|
|||
self.evaluate_app_reapply = mock.MagicMock()
|
||||
self.update_clock_synchronization_config = mock.MagicMock()
|
||||
self.store_default_config = mock.MagicMock()
|
||||
self.kube_upgrade_control_plane = mock.MagicMock()
|
||||
self.kube_upgrade_kubelet = mock.MagicMock()
|
||||
|
||||
def create_ihost(self, context, values):
|
||||
# Create the host in the DB as the code under test expects this
|
||||
|
@ -275,6 +278,734 @@ class TestPostWorkerMixin(object):
|
|||
self.assertEqual(ndict['serialid'], result['serialid'])
|
||||
|
||||
|
||||
class TestPostKubeUpgrades(TestHost):
|
||||
|
||||
def setUp(self):
|
||||
super(TestPostKubeUpgrades, self).setUp()
|
||||
|
||||
# Mock the KubeOperator
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-0': 'v1.42.1',
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
def mock_kube_get_control_plane_versions(obj):
|
||||
return self.kube_get_control_plane_versions_result
|
||||
self.mocked_kube_get_control_plane_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_control_plane_versions',
|
||||
mock_kube_get_control_plane_versions)
|
||||
self.mocked_kube_get_control_plane_versions.start()
|
||||
self.addCleanup(self.mocked_kube_get_control_plane_versions.stop)
|
||||
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-0': 'v1.42.1',
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
def mock_kube_get_kubelet_versions(obj):
|
||||
return self.kube_get_kubelet_versions_result
|
||||
self.mocked_kube_get_kubelet_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_kubelet_versions',
|
||||
mock_kube_get_kubelet_versions)
|
||||
self.mocked_kube_get_kubelet_versions.start()
|
||||
self.addCleanup(self.mocked_kube_get_kubelet_versions.stop)
|
||||
|
||||
# Mock the KubeVersion
|
||||
self.get_kube_versions_result = [
|
||||
{'version': 'v1.42.1',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
]
|
||||
|
||||
def mock_get_kube_versions():
|
||||
return self.get_kube_versions_result
|
||||
self.mocked_get_kube_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.get_kube_versions',
|
||||
mock_get_kube_versions)
|
||||
self.mocked_get_kube_versions.start()
|
||||
self.addCleanup(self.mocked_get_kube_versions.stop)
|
||||
|
||||
# Mock the patching API
|
||||
self.mock_patch_is_applied_result = True
|
||||
|
||||
def mock_patch_is_applied(token, timeout, region_name, patches):
|
||||
return self.mock_patch_is_applied_result
|
||||
self.mocked_patch_is_applied = mock.patch(
|
||||
'sysinv.api.controllers.v1.patch_api.patch_is_applied',
|
||||
mock_patch_is_applied)
|
||||
self.mocked_patch_is_applied.start()
|
||||
self.addCleanup(self.mocked_patch_is_applied.stop)
|
||||
|
||||
def test_kube_upgrade_control_plane_controller_0(self):
|
||||
# Test upgrading kubernetes control plane on controller-0
|
||||
|
||||
# Create controller-0
|
||||
c0 = self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
# Upgrade the control plane
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_control_plane',
|
||||
body, headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify the host was returned
|
||||
self.assertEqual(result.json['hostname'], 'controller-0')
|
||||
|
||||
# Verify the control plane was upgraded
|
||||
self.fake_conductor_api.kube_upgrade_control_plane.\
|
||||
assert_called_with(mock.ANY, c0.uuid)
|
||||
|
||||
# Verify that the target version was updated
|
||||
result = self.get_json('/kube_host_upgrades/1')
|
||||
self.assertEqual(result['target_version'], 'v1.42.2')
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
result = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid)
|
||||
self.assertEqual(result['state'],
|
||||
kubernetes.KUBE_UPGRADING_FIRST_MASTER)
|
||||
|
||||
def test_kube_upgrade_control_plane_controller_1(self):
|
||||
# Test upgrading kubernetes control plane on controller-1
|
||||
|
||||
# Create controllers
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
c1 = self._create_controller_1(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
# Upgrade the control plane
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-1/kube_upgrade_control_plane',
|
||||
body, headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify the host was returned
|
||||
self.assertEqual(result.json['hostname'], 'controller-1')
|
||||
|
||||
# Verify the control plane was upgraded
|
||||
self.fake_conductor_api.kube_upgrade_control_plane.\
|
||||
assert_called_with(mock.ANY, c1.uuid)
|
||||
|
||||
# Verify that the target version was updated
|
||||
result = self.get_json('/kube_host_upgrades/2')
|
||||
self.assertEqual(result['target_version'], 'v1.42.2')
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
result = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid)
|
||||
self.assertEqual(result['state'],
|
||||
kubernetes.KUBE_UPGRADING_FIRST_MASTER)
|
||||
|
||||
def test_kube_upgrade_control_plane_second_controller(self):
|
||||
# Test upgrading kubernetes control plane on the second controller
|
||||
|
||||
# Create controllers
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
c1 = self._create_controller_1(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_NETWORKING,
|
||||
)
|
||||
|
||||
# Upgrade the control plane
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-1/kube_upgrade_control_plane',
|
||||
body, headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify the host was returned
|
||||
self.assertEqual(result.json['hostname'], 'controller-1')
|
||||
|
||||
# Verify the control plane was upgraded
|
||||
self.fake_conductor_api.kube_upgrade_control_plane.\
|
||||
assert_called_with(mock.ANY, c1.uuid)
|
||||
|
||||
# Verify that the target version was updated
|
||||
result = self.get_json('/kube_host_upgrades/2')
|
||||
self.assertEqual(result['target_version'], 'v1.42.2')
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
result = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid)
|
||||
self.assertEqual(result['state'],
|
||||
kubernetes.KUBE_UPGRADING_SECOND_MASTER)
|
||||
|
||||
def test_kube_upgrade_control_plane_no_upgrade(self):
|
||||
# Test upgrading kubernetes control plane with no upgrade
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Upgrade the control plane
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_control_plane',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("upgrade is not in progress",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_control_plane_wrong_upgrade_state(self):
|
||||
# Test upgrading kubernetes control plane with wrong upgrade state
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_KUBELETS,
|
||||
)
|
||||
|
||||
# Upgrade the control plane
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_control_plane',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("upgrade must be in the",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_control_plane_wrong_personality(self):
|
||||
# Test upgrading kubernetes control plane with wrong personality
|
||||
|
||||
# Create hosts
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
self._create_worker(
|
||||
mgmt_ip='192.168.204.5',
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
# Upgrade the control plane
|
||||
result = self.post_json(
|
||||
'/ihosts/worker-0/kube_upgrade_control_plane',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("does not have a kubernetes control plane",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_control_plane_missing_version(self):
|
||||
# Test upgrading kubernetes control plane with no control plane version
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
# No control plane version for this controller
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
# Upgrade the control plane
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_control_plane',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("Unable to determine the version",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_control_plane_wrong_host_state(self):
|
||||
# Test upgrading kubernetes control plane with wrong host state
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_DISABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
# Upgrade the control plane
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_control_plane',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("must be unlocked and available",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_0(self):
|
||||
# Test upgrading kubernetes kubelet on controller-0
|
||||
|
||||
# Create controller-0
|
||||
c0 = self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_DISABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
)
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify the host was returned
|
||||
self.assertEqual(result.json['hostname'], 'controller-0')
|
||||
|
||||
# Verify the kubelet was upgraded
|
||||
self.fake_conductor_api.kube_upgrade_kubelet.\
|
||||
assert_called_with(mock.ANY, c0.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
result = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid)
|
||||
self.assertEqual(result['state'],
|
||||
kubernetes.KUBE_UPGRADING_KUBELETS)
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_1(self):
|
||||
# Test upgrading kubernetes kubelet on controller-1
|
||||
|
||||
# Create controllers
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
c1 = self._create_controller_1(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_DISABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
)
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-1/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify the host was returned
|
||||
self.assertEqual(result.json['hostname'], 'controller-1')
|
||||
|
||||
# Verify the kubelet was upgraded
|
||||
self.fake_conductor_api.kube_upgrade_kubelet.\
|
||||
assert_called_with(mock.ANY, c1.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
result = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid)
|
||||
self.assertEqual(result['state'],
|
||||
kubernetes.KUBE_UPGRADING_KUBELETS)
|
||||
|
||||
def test_kube_upgrade_kubelet_worker(self):
|
||||
# Test upgrading kubernetes kubelet on worker
|
||||
|
||||
# Create hosts
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
self._create_controller_1(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
w0 = self._create_worker(
|
||||
mgmt_ip='192.168.204.5',
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_KUBELETS,
|
||||
)
|
||||
|
||||
# Indicate kubelets on controllers have been upgraded
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.2',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/worker-0/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify the host was returned
|
||||
self.assertEqual(result.json['hostname'], 'worker-0')
|
||||
|
||||
# Verify that the target version was updated
|
||||
result = self.get_json('/kube_host_upgrades/3')
|
||||
self.assertEqual(result['target_version'], 'v1.42.2')
|
||||
|
||||
# Verify the kubelet was upgraded
|
||||
self.fake_conductor_api.kube_upgrade_kubelet.\
|
||||
assert_called_with(mock.ANY, w0.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
result = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid)
|
||||
self.assertEqual(result['state'],
|
||||
kubernetes.KUBE_UPGRADING_KUBELETS)
|
||||
|
||||
def test_kube_upgrade_kubelet_no_upgrade(self):
|
||||
# Test upgrading kubernetes kubelet on controller-0 with no upgrade
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_DISABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Upgrade the kubelet
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_kubelet',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("upgrade is not in progress",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_no_kubelet(self):
|
||||
# Test upgrading kubernetes kubelet where there is no kubelet
|
||||
|
||||
# Create storage-0
|
||||
self._create_test_host(
|
||||
personality=constants.STORAGE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
)
|
||||
|
||||
# Upgrade the kubelet
|
||||
result = self.post_json(
|
||||
'/ihosts/storage-0/kube_upgrade_kubelet',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("host does not have a kubelet",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_0_wrong_upgrade_state(self):
|
||||
# Test upgrading kubernetes kubelet on controller-0 with upgrade in
|
||||
# the wrong state.
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_DISABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("upgrade must be in the",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_0_missing_patches(self):
|
||||
# Test upgrading kubernetes kubelet on controller-0 with missing
|
||||
# patches.
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_DISABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
)
|
||||
|
||||
# Fake the missing patches
|
||||
self.mock_patch_is_applied_result = False
|
||||
self.get_kube_versions_result = [
|
||||
{'version': 'v1.42.1',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': ['MISSING_PATCH.1', 'MISSING_PATCH.2'],
|
||||
},
|
||||
]
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("The following patches must be applied",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_worker_wrong_order(self):
|
||||
# Test upgrading kubernetes kubelet on worker before controllers
|
||||
|
||||
# Create hosts
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
self._create_controller_1(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
self._create_worker(
|
||||
mgmt_ip='192.168.204.5',
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_KUBELETS,
|
||||
)
|
||||
|
||||
# Indicate kubelets on controllers have not been upgraded
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-0': 'v1.42.1',
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/worker-0/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("The kubelets on all controller hosts must be upgraded",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_0_missing_kubelet(self):
|
||||
# Test upgrading kubernetes kubelet on controller-0 with kubelet
|
||||
# version missing.
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_DISABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
)
|
||||
|
||||
# No kubelet version for controller-0
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("Unable to determine the version of the kubelet",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_0_wrong_host_state(self):
|
||||
# Test upgrading kubernetes kubelet on controller-0 with controller
|
||||
# in the wrong state.
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_AVAILABLE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
)
|
||||
|
||||
# Upgrade the kubelet
|
||||
body = {}
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_kubelet',
|
||||
body, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("The host must be locked and online",
|
||||
result.json['error_message'])
|
||||
|
||||
|
||||
class TestDelete(TestHost):
|
||||
|
||||
def test_delete_host(self):
|
||||
|
|
|
@ -0,0 +1,264 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Tests for the API /kube_host_upgrades/ methods.
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.openstack.common import uuidutils
|
||||
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import base as dbbase
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
|
||||
|
||||
class TestKubeHostUpgrade(base.FunctionalTest, dbbase.BaseHostTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestKubeHostUpgrade, self).setUp()
|
||||
|
||||
# Mock the KubeOperator
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-0': 'v1.42.1',
|
||||
'controller-1': 'v1.42.1'}
|
||||
|
||||
def mock_kube_get_control_plane_versions(obj):
|
||||
return self.kube_get_control_plane_versions_result
|
||||
self.mocked_kube_get_control_plane_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_control_plane_versions',
|
||||
mock_kube_get_control_plane_versions)
|
||||
self.mocked_kube_get_control_plane_versions.start()
|
||||
self.addCleanup(self.mocked_kube_get_control_plane_versions.stop)
|
||||
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.2',
|
||||
'worker-0': 'v1.42.2'}
|
||||
|
||||
def mock_kube_get_kubelet_versions(obj):
|
||||
return self.kube_get_kubelet_versions_result
|
||||
self.mocked_kube_get_kubelet_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_kubelet_versions',
|
||||
mock_kube_get_kubelet_versions)
|
||||
self.mocked_kube_get_kubelet_versions.start()
|
||||
self.addCleanup(self.mocked_kube_get_kubelet_versions.stop)
|
||||
|
||||
def _create_controller_0(self, subfunction=None, numa_nodes=1, **kw):
|
||||
return self._create_test_host(
|
||||
personality=constants.CONTROLLER,
|
||||
subfunction=subfunction,
|
||||
numa_nodes=numa_nodes,
|
||||
unit=0,
|
||||
**kw)
|
||||
|
||||
def _create_controller_1(self, subfunction=None, numa_nodes=1, **kw):
|
||||
return self._create_test_host(
|
||||
personality=constants.CONTROLLER,
|
||||
subfunction=subfunction,
|
||||
numa_nodes=numa_nodes,
|
||||
unit=1,
|
||||
**kw)
|
||||
|
||||
def _create_worker(self, unit=0, numa_nodes=1, **kw):
|
||||
return self._create_test_host(
|
||||
personality=constants.WORKER,
|
||||
numa_nodes=numa_nodes,
|
||||
unit=unit,
|
||||
**kw)
|
||||
|
||||
def _create_storage(self, unit=0, numa_nodes=1, **kw):
|
||||
return self._create_test_host(
|
||||
personality=constants.STORAGE,
|
||||
numa_nodes=numa_nodes,
|
||||
unit=unit,
|
||||
**kw)
|
||||
|
||||
|
||||
class TestListKubeHostUpgrade(TestKubeHostUpgrade):
|
||||
|
||||
def test_empty_host(self):
|
||||
self.kube_get_control_plane_versions_result = dict()
|
||||
self.kube_get_kubelet_versions_result = dict()
|
||||
data = self.get_json('/kube_host_upgrades')
|
||||
self.assertEqual([], data['kube_host_upgrades'])
|
||||
|
||||
def test_one(self):
|
||||
# Create host
|
||||
self._create_controller_0()
|
||||
|
||||
# Verify that the kube_host_upgrade was created
|
||||
result = self.get_json('/kube_host_upgrades/1')
|
||||
|
||||
self.assertIn('id', result)
|
||||
assert(uuidutils.is_uuid_like(result['uuid']))
|
||||
self.assertEqual(result['target_version'], None)
|
||||
self.assertEqual(result['status'], None)
|
||||
self.assertEqual(result['control_plane_version'], 'v1.42.1')
|
||||
self.assertEqual(result['kubelet_version'], 'v1.42.2')
|
||||
self.assertEqual(result['host_id'], 1)
|
||||
|
||||
# Verify that hidden attributes are not returned
|
||||
self.assertNotIn('reserved_1', result)
|
||||
self.assertNotIn('reserved_2', result)
|
||||
self.assertNotIn('reserved_3', result)
|
||||
self.assertNotIn('reserved_4', result)
|
||||
|
||||
def test_one_no_dynamic_info(self):
|
||||
# Create host
|
||||
self._create_worker(unit=42)
|
||||
|
||||
# Verify that the kube_host_upgrade was created
|
||||
result = self.get_json('/kube_host_upgrades/1')
|
||||
|
||||
self.assertIn('id', result)
|
||||
assert(uuidutils.is_uuid_like(result['uuid']))
|
||||
self.assertEqual(result['target_version'], None)
|
||||
self.assertEqual(result['status'], None)
|
||||
self.assertEqual(result['control_plane_version'], 'N/A')
|
||||
self.assertEqual(result['kubelet_version'], 'unknown')
|
||||
self.assertEqual(result['host_id'], 1)
|
||||
|
||||
def test_one_no_kubernetes(self):
|
||||
# Create host
|
||||
self._create_storage()
|
||||
|
||||
# Verify that the kube_host_upgrade was created
|
||||
result = self.get_json('/kube_host_upgrades/1')
|
||||
|
||||
self.assertIn('id', result)
|
||||
assert(uuidutils.is_uuid_like(result['uuid']))
|
||||
self.assertEqual(result['target_version'], None)
|
||||
self.assertEqual(result['status'], None)
|
||||
self.assertEqual(result['control_plane_version'], 'N/A')
|
||||
self.assertEqual(result['kubelet_version'], 'N/A')
|
||||
self.assertEqual(result['host_id'], 1)
|
||||
|
||||
def test_all(self):
|
||||
# Create hosts
|
||||
self._create_controller_0()
|
||||
self._create_controller_1()
|
||||
worker = self._create_worker(mgmt_ip='192.168.24.12')
|
||||
data = self.get_json('/kube_host_upgrades')
|
||||
self.assertEqual(3, len(data['kube_host_upgrades']))
|
||||
host_id = 1
|
||||
for upgrade in data['kube_host_upgrades']:
|
||||
self.assertIn('id', upgrade)
|
||||
assert (uuidutils.is_uuid_like(upgrade['uuid']))
|
||||
self.assertEqual(upgrade['target_version'], None)
|
||||
self.assertEqual(upgrade['status'], None)
|
||||
if upgrade['host_id'] == worker.id:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'N/A')
|
||||
else:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'v1.42.1')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'v1.42.2')
|
||||
self.assertEqual(upgrade['host_id'], host_id)
|
||||
host_id += 1
|
||||
|
||||
def test_all_ignore_profile(self):
|
||||
# Create hosts
|
||||
self._create_controller_0()
|
||||
self._create_controller_1()
|
||||
worker = self._create_worker(mgmt_ip='192.168.24.12')
|
||||
self._create_worker(mgmt_ip='192.168.24.13',
|
||||
unit=1,
|
||||
recordtype='profile')
|
||||
data = self.get_json('/kube_host_upgrades')
|
||||
self.assertEqual(3, len(data['kube_host_upgrades']))
|
||||
host_id = 1
|
||||
for upgrade in data['kube_host_upgrades']:
|
||||
self.assertIn('id', upgrade)
|
||||
assert (uuidutils.is_uuid_like(upgrade['uuid']))
|
||||
self.assertEqual(upgrade['target_version'], None)
|
||||
self.assertEqual(upgrade['status'], None)
|
||||
if upgrade['host_id'] == worker.id:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'N/A')
|
||||
else:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'v1.42.1')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'v1.42.2')
|
||||
self.assertEqual(upgrade['host_id'], host_id)
|
||||
host_id += 1
|
||||
|
||||
def test_all_no_dynamic_info(self):
|
||||
# Create hosts
|
||||
self._create_controller_0()
|
||||
self._create_controller_1()
|
||||
worker = self._create_worker(mgmt_ip='192.168.24.12')
|
||||
worker_42 = self._create_worker(unit=42, mgmt_ip='192.168.24.13')
|
||||
data = self.get_json('/kube_host_upgrades')
|
||||
self.assertEqual(4, len(data['kube_host_upgrades']))
|
||||
host_id = 1
|
||||
for upgrade in data['kube_host_upgrades']:
|
||||
self.assertIn('id', upgrade)
|
||||
assert (uuidutils.is_uuid_like(upgrade['uuid']))
|
||||
self.assertEqual(upgrade['target_version'], None)
|
||||
self.assertEqual(upgrade['status'], None)
|
||||
if upgrade['host_id'] == worker_42.id:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'N/A')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'unknown')
|
||||
elif upgrade['host_id'] == worker.id:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'N/A')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'v1.42.2')
|
||||
else:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'v1.42.1')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'v1.42.2')
|
||||
self.assertEqual(upgrade['host_id'], host_id)
|
||||
host_id += 1
|
||||
|
||||
def test_all_no_kubernetes(self):
|
||||
# Create hosts
|
||||
self._create_controller_0()
|
||||
self._create_controller_1()
|
||||
worker = self._create_worker(mgmt_ip='192.168.24.12')
|
||||
storage = self._create_storage(mgmt_ip='192.168.24.13')
|
||||
data = self.get_json('/kube_host_upgrades')
|
||||
self.assertEqual(4, len(data['kube_host_upgrades']))
|
||||
host_id = 1
|
||||
for upgrade in data['kube_host_upgrades']:
|
||||
self.assertIn('id', upgrade)
|
||||
assert (uuidutils.is_uuid_like(upgrade['uuid']))
|
||||
self.assertEqual(upgrade['target_version'], None)
|
||||
self.assertEqual(upgrade['status'], None)
|
||||
if upgrade['host_id'] == storage.id:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'N/A')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'N/A')
|
||||
elif upgrade['host_id'] == worker.id:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'N/A')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'v1.42.2')
|
||||
else:
|
||||
self.assertEqual(upgrade['control_plane_version'], 'v1.42.1')
|
||||
self.assertEqual(upgrade['kubelet_version'], 'v1.42.2')
|
||||
self.assertEqual(upgrade['host_id'], host_id)
|
||||
host_id += 1
|
||||
|
||||
def test_host_links(self):
|
||||
uuid = uuidutils.generate_uuid()
|
||||
ndict = dbutils.get_test_ihost(id=1, uuid=uuid,
|
||||
forisystemid=self.system.id)
|
||||
self.dbapi.ihost_create(ndict)
|
||||
data = self.get_json('/kube_host_upgrades/1')
|
||||
upgrade_uuid = data['uuid']
|
||||
self.assertIn('links', data.keys())
|
||||
self.assertEqual(len(data['links']), 2)
|
||||
self.assertIn(upgrade_uuid, data['links'][0]['href'])
|
||||
|
||||
def test_collection_links(self):
|
||||
hosts = []
|
||||
for hostid in range(100):
|
||||
ndict = dbutils.get_test_ihost(
|
||||
id=hostid, hostname=hostid, mgmt_mac=hostid,
|
||||
forisystemid=self.system.id,
|
||||
mgmt_ip="%s.%s.%s.%s" % (hostid, hostid, hostid, hostid),
|
||||
uuid=uuidutils.generate_uuid())
|
||||
host = self.dbapi.ihost_create(ndict)
|
||||
hosts.append(host['uuid'])
|
||||
data = self.get_json('/kube_host_upgrades/?limit=100')
|
||||
self.assertEqual(len(data['kube_host_upgrades']), 100)
|
||||
|
||||
next_marker = data['kube_host_upgrades'][-1]['uuid']
|
||||
self.assertIn(next_marker, data['next'])
|
|
@ -0,0 +1,464 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Tests for the API /kube_upgrade/ methods.
|
||||
"""
|
||||
|
||||
import mock
|
||||
from six.moves import http_client
|
||||
|
||||
from sysinv.common import kubernetes
|
||||
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import base as dbbase
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
|
||||
FAKE_KUBE_VERSIONS = [
|
||||
{'version': 'v1.42.1',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.1', 'KUBE.2'],
|
||||
'available_patches': ['KUBE.3'],
|
||||
},
|
||||
{'version': 'v1.43.1',
|
||||
'upgrade_from': ['v1.42.2'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': ['KUBE.11', 'KUBE.12'],
|
||||
'available_patches': ['KUBE.13'],
|
||||
},
|
||||
{'version': 'v1.43.2',
|
||||
'upgrade_from': ['v1.43.1', 'v1.42.2'],
|
||||
'downgrade_to': ['v1.43.1'],
|
||||
'applied_patches': ['KUBE.14', 'KUBE.15'],
|
||||
'available_patches': ['KUBE.16'],
|
||||
},
|
||||
{'version': 'v1.43.3',
|
||||
'upgrade_from': ['v1.43.2'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class FakeConductorAPI(object):
|
||||
|
||||
def __init__(self):
|
||||
self.kube_download_images = mock.MagicMock()
|
||||
self.kube_upgrade_networking = mock.MagicMock()
|
||||
self.get_system_health_return = (True, "System is super healthy")
|
||||
|
||||
def get_system_health(self, context, force=False):
|
||||
if force:
|
||||
return True, "System is healthy because I was forced to say that"
|
||||
else:
|
||||
return self.get_system_health_return
|
||||
|
||||
|
||||
class TestKubeUpgrade(base.FunctionalTest, dbbase.BaseSystemTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestKubeUpgrade, self).setUp()
|
||||
|
||||
# Mock the Conductor API
|
||||
self.fake_conductor_api = FakeConductorAPI()
|
||||
p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI')
|
||||
self.mock_conductor_api = p.start()
|
||||
self.mock_conductor_api.return_value = self.fake_conductor_api
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the patching API
|
||||
self.mock_patch_is_applied_result = True
|
||||
|
||||
def mock_patch_is_applied(token, timeout, region_name, patches):
|
||||
return self.mock_patch_is_applied_result
|
||||
self.mocked_patch_is_applied = mock.patch(
|
||||
'sysinv.api.controllers.v1.patch_api.patch_is_applied',
|
||||
mock_patch_is_applied)
|
||||
self.mocked_patch_is_applied.start()
|
||||
self.addCleanup(self.mocked_patch_is_applied.stop)
|
||||
|
||||
self.mock_patch_is_available_result = True
|
||||
|
||||
def mock_patch_is_available(token, timeout, region_name, patches):
|
||||
return self.mock_patch_is_available_result
|
||||
self.mocked_patch_is_available = mock.patch(
|
||||
'sysinv.api.controllers.v1.patch_api.patch_is_available',
|
||||
mock_patch_is_available)
|
||||
self.mocked_patch_is_available.start()
|
||||
self.addCleanup(self.mocked_patch_is_available.stop)
|
||||
|
||||
# Mock the KubeVersion
|
||||
def mock_get_kube_versions():
|
||||
return FAKE_KUBE_VERSIONS
|
||||
self.mocked_get_kube_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.get_kube_versions',
|
||||
mock_get_kube_versions)
|
||||
self.mocked_get_kube_versions.start()
|
||||
self.addCleanup(self.mocked_get_kube_versions.stop)
|
||||
|
||||
# Mock the KubeOperator
|
||||
self.kube_get_kubernetes_version_result = 'v1.43.1'
|
||||
|
||||
def mock_kube_get_kubernetes_version(obj):
|
||||
return self.kube_get_kubernetes_version_result
|
||||
self.mocked_kube_get_kubernetes_version = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_kubernetes_version',
|
||||
mock_kube_get_kubernetes_version)
|
||||
self.mocked_kube_get_kubernetes_version.start()
|
||||
self.addCleanup(self.mocked_kube_get_kubernetes_version.stop)
|
||||
|
||||
self.kube_get_version_states_result = {'v1.42.1': 'available',
|
||||
'v1.42.2': 'available',
|
||||
'v1.43.1': 'active',
|
||||
'v1.43.2': 'available',
|
||||
'v1.43.3': 'available'}
|
||||
|
||||
def mock_kube_get_version_states(obj):
|
||||
return self.kube_get_version_states_result
|
||||
self.mocked_kube_get_version_states = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_version_states',
|
||||
mock_kube_get_version_states)
|
||||
self.mocked_kube_get_version_states.start()
|
||||
self.addCleanup(self.mocked_kube_get_version_states.stop)
|
||||
|
||||
|
||||
class TestListKubeUpgrade(TestKubeUpgrade):
|
||||
|
||||
def test_one(self):
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
result = self.get_json('/kube_upgrade/%s' % kube_upgrade['uuid'])
|
||||
|
||||
# Verify that the upgrade has the expected attributes
|
||||
self.assertEqual(result['from_version'], 'v1.42.1')
|
||||
self.assertEqual(result['to_version'], 'v1.42.2')
|
||||
self.assertEqual(result['state'],
|
||||
kubernetes.KUBE_UPGRADING_FIRST_MASTER)
|
||||
|
||||
def test_all(self):
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
|
||||
# Verify that the upgrade has the expected attributes
|
||||
data = self.get_json('/kube_upgrade')
|
||||
self.assertEqual(1, len(data['kube_upgrades']))
|
||||
self.assertEqual(data['kube_upgrades'][0]['from_version'], 'v1.42.1')
|
||||
self.assertEqual(data['kube_upgrades'][0]['to_version'], 'v1.42.2')
|
||||
self.assertEqual(data['kube_upgrades'][0]['state'],
|
||||
kubernetes.KUBE_UPGRADING_FIRST_MASTER)
|
||||
|
||||
|
||||
class TestPostKubeUpgrade(TestKubeUpgrade, dbbase.ControllerHostTestCase):
|
||||
|
||||
def test_create(self):
|
||||
# Test creation of upgrade
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify that the images were downloaded
|
||||
self.fake_conductor_api.kube_download_images.\
|
||||
assert_called_with(mock.ANY, 'v1.43.2')
|
||||
# Verify that the upgrade has the expected attributes
|
||||
self.assertEqual(result.json['from_version'], 'v1.43.1')
|
||||
self.assertEqual(result.json['to_version'], 'v1.43.2')
|
||||
self.assertEqual(result.json['state'],
|
||||
kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES)
|
||||
# Verify that the target version for the host was updated
|
||||
kube_host_upgrade = self.dbapi.kube_host_upgrade_get_by_host(
|
||||
self.host.id)
|
||||
self.assertEqual('v1.43.1', kube_host_upgrade.target_version)
|
||||
|
||||
def test_create_upgrade_exists(self):
|
||||
# Test creation of upgrade when upgrade already exists
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("upgrade is already in progress",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_target_version_does_not_exist(self):
|
||||
# Test creation of upgrade when target version doesn't exist
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.45.45')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("version v1.45.45 is not available",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_upgrade_path_not_supported(self):
|
||||
# Test creation of upgrade when upgrade path is not supported
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.3')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("version v1.43.1 cannot upgrade to",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_current_version_not_active(self):
|
||||
# Test creation of upgrade when current version is not active
|
||||
self.kube_get_version_states_result = {'v1.42.1': 'available',
|
||||
'v1.42.2': 'available',
|
||||
'v1.43.1': 'partial',
|
||||
'v1.43.2': 'available',
|
||||
'v1.43.3': 'available'}
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("version v1.43.1 is not active",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_system_unhealthy(self):
|
||||
# Test creation of upgrade when system health check fails
|
||||
self.fake_conductor_api.get_system_health_return = (
|
||||
False, "System is very very unhealthy")
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("System is not in a valid state",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_system_unhealthy_force(self):
|
||||
# Test creation of upgrade when system health check fails but
|
||||
# overridden with force
|
||||
self.fake_conductor_api.get_system_health_return = (
|
||||
False, "System is very very unhealthy")
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(
|
||||
to_version='v1.43.2')
|
||||
create_dict['force'] = True
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify that the upgrade has the expected attributes
|
||||
self.assertEqual(result.json['from_version'], 'v1.43.1')
|
||||
self.assertEqual(result.json['to_version'], 'v1.43.2')
|
||||
self.assertEqual(result.json['state'],
|
||||
kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES)
|
||||
|
||||
def test_create_no_patches_required(self):
|
||||
# Test creation of upgrade when no applied patches are required
|
||||
self.mock_patch_is_applied_result = False
|
||||
self.mock_patch_is_available_result = False
|
||||
self.kube_get_kubernetes_version_result = 'v1.43.2'
|
||||
self.kube_get_version_states_result = {'v1.42.1': 'available',
|
||||
'v1.42.2': 'available',
|
||||
'v1.43.1': 'available',
|
||||
'v1.43.2': 'active',
|
||||
'v1.43.3': 'available'}
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.3')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify that the upgrade has the expected attributes
|
||||
self.assertEqual(result.json['from_version'], 'v1.43.2')
|
||||
self.assertEqual(result.json['to_version'], 'v1.43.3')
|
||||
self.assertEqual(result.json['state'],
|
||||
kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES)
|
||||
|
||||
def test_create_applied_patch_missing(self):
|
||||
# Test creation of upgrade when applied patch is missing
|
||||
self.mock_patch_is_applied_result = False
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("The following patches must be applied",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_available_patch_missing(self):
|
||||
# Test creation of upgrade when available patch is missing
|
||||
self.mock_patch_is_available_result = False
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("The following patches must be available",
|
||||
result.json['error_message'])
|
||||
|
||||
|
||||
class TestPatch(TestKubeUpgrade):
|
||||
|
||||
def test_update_state(self):
|
||||
# Test updating the state of an upgrade
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.43.1',
|
||||
to_version='v1.43.2',
|
||||
state=kubernetes.KUBE_UPGRADED_FIRST_MASTER)
|
||||
uuid = kube_upgrade.uuid
|
||||
|
||||
# Update state
|
||||
new_state = kubernetes.KUBE_UPGRADING_NETWORKING
|
||||
response = self.patch_json('/kube_upgrade',
|
||||
[{'path': '/state',
|
||||
'value': new_state,
|
||||
'op': 'replace'}],
|
||||
headers={'User-Agent': 'sysinv-test'})
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
self.assertEqual(response.json['from_version'], 'v1.43.1')
|
||||
self.assertEqual(response.json['to_version'], 'v1.43.2')
|
||||
self.assertEqual(response.json['state'], new_state)
|
||||
|
||||
# Verify that networking was upgraded
|
||||
self.fake_conductor_api.kube_upgrade_networking.\
|
||||
assert_called_with(mock.ANY, 'v1.43.2')
|
||||
|
||||
# Verify that the upgrade was updated with the new state
|
||||
result = self.get_json('/kube_upgrade/%s' % uuid)
|
||||
self.assertEqual(result['from_version'], 'v1.43.1')
|
||||
self.assertEqual(result['to_version'], 'v1.43.2')
|
||||
self.assertEqual(result['state'], new_state)
|
||||
|
||||
def test_update_state_no_upgrade(self):
|
||||
# Test updating the state when an upgrade doesn't exist
|
||||
|
||||
# Update state
|
||||
new_state = kubernetes.KUBE_UPGRADING_NETWORKING
|
||||
result = self.patch_json('/kube_upgrade',
|
||||
[{'path': '/state',
|
||||
'value': new_state,
|
||||
'op': 'replace'}],
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("kubernetes upgrade is not in progress",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_update_state_bad_state(self):
|
||||
# Test updating the state of an upgrade with a bad state
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.43.1',
|
||||
to_version='v1.43.2',
|
||||
state=kubernetes.KUBE_UPGRADED_FIRST_MASTER)
|
||||
|
||||
# Update state
|
||||
new_state = kubernetes.KUBE_UPGRADE_COMPLETE
|
||||
result = self.patch_json('/kube_upgrade',
|
||||
[{'path': '/state',
|
||||
'value': new_state,
|
||||
'op': 'replace'}],
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("Invalid state",
|
||||
result.json['error_message'])
|
||||
|
||||
|
||||
class TestDelete(TestKubeUpgrade):
|
||||
|
||||
def test_delete(self):
|
||||
# Test deleting an upgrade
|
||||
|
||||
# Create the upgrade
|
||||
kube_upgrade = dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.43.1',
|
||||
to_version='v1.43.2',
|
||||
state=kubernetes.KUBE_UPGRADE_COMPLETE)
|
||||
|
||||
# Delete the upgrade
|
||||
self.delete('/kube_upgrade',
|
||||
headers={'User-Agent': 'sysinv-test'})
|
||||
|
||||
# Verify the upgrade no longer exists
|
||||
response = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid,
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.status_int, 404)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_delete_upgrade_not_complete(self):
|
||||
# Test deleting an upgrade when upgrade is not complete
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.43.1',
|
||||
to_version='v1.43.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER)
|
||||
|
||||
# Delete the upgrade
|
||||
result = self.delete('/kube_upgrade',
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("upgrade must be in upgrade-complete",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_delete_no_upgrade(self):
|
||||
# Test deleting an upgrade when no upgrade exists
|
||||
|
||||
# Delete the upgrade
|
||||
result = self.delete('/kube_upgrade',
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("upgrade is not in progress",
|
||||
result.json['error_message'])
|
|
@ -11,7 +11,10 @@ Tests for the API /kube_version/ methods.
|
|||
import mock
|
||||
import webtest.app
|
||||
|
||||
from sysinv.common import kubernetes
|
||||
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
|
||||
FAKE_KUBE_VERSIONS = [
|
||||
{'version': 'v1.42.1',
|
||||
|
@ -40,6 +43,11 @@ FAKE_KUBE_VERSIONS = [
|
|||
},
|
||||
]
|
||||
|
||||
FAKE_KUBE_STATES = {'v1.42.1': 'available',
|
||||
'v1.42.2': 'available',
|
||||
'v1.43.1': 'active',
|
||||
'v1.43.2': 'available'}
|
||||
|
||||
|
||||
def mock_get_kube_versions():
|
||||
return FAKE_KUBE_VERSIONS
|
||||
|
@ -51,10 +59,7 @@ class TestKubeVersion(base.FunctionalTest):
|
|||
super(TestKubeVersion, self).setUp()
|
||||
|
||||
def mock_kube_get_version_states(obj):
|
||||
return {'v1.42.1': 'available',
|
||||
'v1.42.2': 'available',
|
||||
'v1.43.1': 'active',
|
||||
'v1.43.2': 'available'}
|
||||
return FAKE_KUBE_STATES
|
||||
self.mocked_kube_get_version_states = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_version_states',
|
||||
mock_kube_get_version_states)
|
||||
|
@ -104,6 +109,30 @@ class TestListKubeVersions(TestKubeVersion):
|
|||
self.assertEqual(result['state'], 'active')
|
||||
self.assertEqual(result['target'], True)
|
||||
|
||||
def test_one_upgrade_target(self):
|
||||
# Create an upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.43.1',
|
||||
to_version='v1.43.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
result = self.get_json('/kube_versions/v1.43.2')
|
||||
|
||||
# Verify that the version has the expected attributes
|
||||
self.assertEqual(result['version'],
|
||||
FAKE_KUBE_VERSIONS[3]['version'])
|
||||
self.assertEqual(result['upgrade_from'],
|
||||
FAKE_KUBE_VERSIONS[3]['upgrade_from'])
|
||||
self.assertEqual(result['downgrade_to'],
|
||||
FAKE_KUBE_VERSIONS[3]['downgrade_to'])
|
||||
self.assertEqual(result['applied_patches'],
|
||||
FAKE_KUBE_VERSIONS[3]['applied_patches'])
|
||||
self.assertEqual(result['available_patches'],
|
||||
FAKE_KUBE_VERSIONS[3]['available_patches'])
|
||||
self.assertEqual(result['state'], 'available')
|
||||
self.assertEqual(result['target'], True)
|
||||
|
||||
def test_bad_version(self):
|
||||
self.assertRaises(webtest.app.AppError, self.get_json,
|
||||
'/kube_versions/v1.42.2.unknown')
|
||||
|
@ -111,3 +140,35 @@ class TestListKubeVersions(TestKubeVersion):
|
|||
def test_all(self):
|
||||
data = self.get_json('/kube_versions')
|
||||
self.assertEqual(len(FAKE_KUBE_VERSIONS), len(data['kube_versions']))
|
||||
|
||||
index = 0
|
||||
for result in data['kube_versions']:
|
||||
self.assertEqual(result['version'],
|
||||
FAKE_KUBE_VERSIONS[index]['version'])
|
||||
self.assertEqual(result['state'],
|
||||
FAKE_KUBE_STATES[result['version']])
|
||||
self.assertEqual(result['target'],
|
||||
True if FAKE_KUBE_STATES[result['version']] ==
|
||||
'active' else False)
|
||||
index += 1
|
||||
|
||||
def test_all_upgrade_target(self):
|
||||
# Create an upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.43.1',
|
||||
to_version='v1.43.2',
|
||||
state=kubernetes.KUBE_UPGRADE_STARTED,
|
||||
)
|
||||
|
||||
data = self.get_json('/kube_versions')
|
||||
self.assertEqual(len(FAKE_KUBE_VERSIONS), len(data['kube_versions']))
|
||||
|
||||
index = 0
|
||||
for result in data['kube_versions']:
|
||||
self.assertEqual(result['version'],
|
||||
FAKE_KUBE_VERSIONS[index]['version'])
|
||||
self.assertEqual(result['state'],
|
||||
FAKE_KUBE_STATES[result['version']])
|
||||
self.assertEqual(result['target'],
|
||||
True if result['version'] == 'v1.43.2' else False)
|
||||
index += 1
|
||||
|
|
|
@ -374,6 +374,37 @@ class TestKubeOperator(base.TestCase):
|
|||
]
|
||||
)
|
||||
|
||||
self.config_map_result = kubernetes.client.V1ConfigMap(
|
||||
api_version="v1",
|
||||
data={"ClusterConfiguration":
|
||||
"apiServer:\n"
|
||||
" certSANs:\n"
|
||||
" - 127.0.0.1\n"
|
||||
" - 192.168.206.2\n"
|
||||
"apiVersion: kubeadm.k8s.io/v1beta2\n"
|
||||
"kubernetesVersion: v1.42.4\n"
|
||||
"kind: ClusterStatus\n"
|
||||
},
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kubeadm-config",
|
||||
namespace="kube-system"),
|
||||
)
|
||||
|
||||
self.config_map_result_no_version = kubernetes.client.V1ConfigMap(
|
||||
api_version="v1",
|
||||
data={"ClusterConfiguration":
|
||||
"apiServer:\n"
|
||||
" certSANs:\n"
|
||||
" - 127.0.0.1\n"
|
||||
" - 192.168.206.2\n"
|
||||
"apiVersion: kubeadm.k8s.io/v1beta2\n"
|
||||
"kind: ClusterStatus\n"
|
||||
},
|
||||
metadata=kubernetes.client.V1ObjectMeta(
|
||||
name="kubeadm-config",
|
||||
namespace="kube-system"),
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
super(TestKubeOperator, self).setUp()
|
||||
|
||||
|
@ -398,6 +429,15 @@ class TestKubeOperator(base.TestCase):
|
|||
mock_list_node)
|
||||
self.mocked_list_node.start()
|
||||
|
||||
self.read_namespaced_config_map_result = None
|
||||
|
||||
def mock_read_namespaced_config_map(obj, configmap, namespace):
|
||||
return self.read_namespaced_config_map_result
|
||||
self.mocked_read_namespaced_config_map = mock.patch(
|
||||
'kubernetes.client.CoreV1Api.read_namespaced_config_map',
|
||||
mock_read_namespaced_config_map)
|
||||
self.mocked_read_namespaced_config_map.start()
|
||||
|
||||
self.kube_operator = kube.KubeOperator()
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -545,3 +585,18 @@ class TestKubeOperator(base.TestCase):
|
|||
'v1.42.3': 'available',
|
||||
'v1.42.4': 'available',
|
||||
'v1.43.1': 'available'}
|
||||
|
||||
def test_kube_get_kubernetes_version(self):
|
||||
|
||||
self.read_namespaced_config_map_result = self.config_map_result
|
||||
|
||||
result = self.kube_operator.kube_get_kubernetes_version()
|
||||
assert result == 'v1.42.4'
|
||||
|
||||
def test_kube_get_kubernetes_version_missing_version(self):
|
||||
|
||||
self.read_namespaced_config_map_result = \
|
||||
self.config_map_result_no_version
|
||||
|
||||
result = self.kube_operator.kube_get_kubernetes_version()
|
||||
assert result is None
|
||||
|
|
|
@ -23,39 +23,166 @@
|
|||
"""Test class for Sysinv ManagerService."""
|
||||
|
||||
import mock
|
||||
import uuid
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.conductor import manager
|
||||
from sysinv.db import api as dbapi
|
||||
from sysinv.openstack.common import context
|
||||
|
||||
from sysinv.tests.db import base
|
||||
from sysinv.tests.db import utils
|
||||
|
||||
|
||||
class ManagerTestCase(base.DbTestCase):
|
||||
class FakeCephOperator(object):
|
||||
|
||||
upgrade_downgrade_kube_components_patcher = mock.patch.object(
|
||||
manager.ConductorManager, '_upgrade_downgrade_kube_components')
|
||||
def __init__(self, db_api):
|
||||
self.dbapi = dbapi
|
||||
|
||||
|
||||
class FakePuppetOperator(object):
|
||||
|
||||
def __init__(self, db_api):
|
||||
self.dbapi = dbapi
|
||||
self.update_host_config = mock.MagicMock()
|
||||
self.update_system_config = mock.MagicMock()
|
||||
self.update_secure_system_config = mock.MagicMock()
|
||||
|
||||
|
||||
class FakePopen(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Pretend all is OK
|
||||
self.returncode = 0
|
||||
|
||||
def communicate(self):
|
||||
return "Fake stdout", "Fake stderr"
|
||||
|
||||
|
||||
class ManagerTestCase(base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ManagerTestCase, self).setUp()
|
||||
|
||||
# Set up objects for testing
|
||||
self.service = manager.ConductorManager('test-host', 'test-topic')
|
||||
self.service.dbapi = dbapi.get_instance()
|
||||
self.context = context.get_admin_context()
|
||||
self.dbapi = dbapi.get_instance()
|
||||
self.system = utils.create_test_isystem()
|
||||
self.load = utils.create_test_load()
|
||||
self.dnsmasq_hosts_file = '/tmp/dnsmasq.hosts'
|
||||
|
||||
self.mock_upgrade_downgrade_kube_components = self.upgrade_downgrade_kube_components_patcher.start()
|
||||
# Mock the ceph operator
|
||||
self.fake_ceph_operator = FakeCephOperator(self.dbapi)
|
||||
p = mock.patch('sysinv.conductor.ceph.CephOperator')
|
||||
self.mock_ceph_operator = p.start()
|
||||
self.mock_ceph_operator.return_value = self.fake_ceph_operator
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
def tearDown(self):
|
||||
super(ManagerTestCase, self).tearDown()
|
||||
self.upgrade_downgrade_kube_components_patcher.stop()
|
||||
# Mock the puppet operator
|
||||
self.fakepuppet_operator = FakePuppetOperator(self.dbapi)
|
||||
p = mock.patch('sysinv.puppet.puppet.PuppetOperator')
|
||||
self.mockpuppet_operator = p.start()
|
||||
self.mockpuppet_operator.return_value = self.fakepuppet_operator
|
||||
self.addCleanup(p.stop)
|
||||
self.service._puppet = self.fakepuppet_operator
|
||||
|
||||
# Mock manager methods
|
||||
self.upgrade_downgrade_kube_components_patcher = mock.patch.object(
|
||||
manager.ConductorManager, '_upgrade_downgrade_kube_components')
|
||||
self.mock_upgrade_downgrade_kube_components = \
|
||||
self.upgrade_downgrade_kube_components_patcher.start()
|
||||
self.addCleanup(self.mock_upgrade_downgrade_kube_components.stop)
|
||||
|
||||
self.do_update_alarm_status_patcher = mock.patch.object(
|
||||
manager.ConductorManager, '_do_update_alarm_status')
|
||||
self.mock_do_update_alarm_status = \
|
||||
self.do_update_alarm_status_patcher.start()
|
||||
self.addCleanup(self.mock_do_update_alarm_status.stop)
|
||||
|
||||
self.fail_config_apply_runtime_manifest = False
|
||||
|
||||
def mock_config_apply_runtime_manifest(obj, context, config_uuid,
|
||||
config_dict):
|
||||
if not self.fail_config_apply_runtime_manifest:
|
||||
# Pretend the config was applied
|
||||
for host_uuid in config_dict['host_uuids']:
|
||||
self.dbapi.ihost_update(host_uuid,
|
||||
{'config_applied': config_uuid})
|
||||
self.mocked_config_apply_runtime_manifest = mock.patch.object(
|
||||
manager.ConductorManager, '_config_apply_runtime_manifest',
|
||||
mock_config_apply_runtime_manifest)
|
||||
self.mocked_config_apply_runtime_manifest.start()
|
||||
self.addCleanup(self.mocked_config_apply_runtime_manifest.stop)
|
||||
|
||||
# Mock subprocess popen
|
||||
self.fake_subprocess_popen = FakePopen()
|
||||
p = mock.patch('eventlet.green.subprocess.Popen')
|
||||
self.mock_subprocess_popen = p.start()
|
||||
self.mock_subprocess_popen.return_value = self.fake_subprocess_popen
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the KubeOperator
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-0': 'v1.42.1',
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
def mock_kube_get_control_plane_versions(obj):
|
||||
return self.kube_get_control_plane_versions_result
|
||||
self.mocked_kube_get_control_plane_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_control_plane_versions',
|
||||
mock_kube_get_control_plane_versions)
|
||||
self.mocked_kube_get_control_plane_versions.start()
|
||||
self.addCleanup(self.mocked_kube_get_control_plane_versions.stop)
|
||||
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-0': 'v1.42.1',
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
def mock_kube_get_kubelet_versions(obj):
|
||||
return self.kube_get_kubelet_versions_result
|
||||
self.mocked_kube_get_kubelet_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.KubeOperator.kube_get_kubelet_versions',
|
||||
mock_kube_get_kubelet_versions)
|
||||
self.mocked_kube_get_kubelet_versions.start()
|
||||
self.addCleanup(self.mocked_kube_get_kubelet_versions.stop)
|
||||
|
||||
# Mock the KubeVersion
|
||||
self.get_kube_versions_result = [
|
||||
{'version': 'v1.42.1',
|
||||
'upgrade_from': [],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
{'version': 'v1.42.2',
|
||||
'upgrade_from': ['v1.42.1'],
|
||||
'downgrade_to': [],
|
||||
'applied_patches': [],
|
||||
'available_patches': [],
|
||||
},
|
||||
]
|
||||
|
||||
def mock_get_kube_versions():
|
||||
return self.get_kube_versions_result
|
||||
self.mocked_get_kube_versions = mock.patch(
|
||||
'sysinv.common.kubernetes.get_kube_versions',
|
||||
mock_get_kube_versions)
|
||||
self.mocked_get_kube_versions.start()
|
||||
self.addCleanup(self.mocked_get_kube_versions.stop)
|
||||
|
||||
def _create_test_ihost(self, **kwargs):
|
||||
# ensure the system ID for proper association
|
||||
kwargs['forisystemid'] = self.system['id']
|
||||
ihost_dict = utils.get_test_ihost(**kwargs)
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kwargs:
|
||||
del ihost_dict['id']
|
||||
ihost = self.dbapi.ihost_create(ihost_dict)
|
||||
return ihost
|
||||
|
||||
|
@ -193,8 +320,6 @@ class ManagerTestCase(base.DbTestCase):
|
|||
self.context,
|
||||
ihost)
|
||||
|
||||
dnsmasq_hosts_file = '/tmp/dnsmasq.hosts'
|
||||
|
||||
def test_configure_ihost_new(self):
|
||||
# Test skipped to prevent error message in Jenkins. Error thrown is:
|
||||
# in test_configure_ihost_new
|
||||
|
@ -279,3 +404,424 @@ class ManagerTestCase(base.DbTestCase):
|
|||
self.service.configure_ihost,
|
||||
self.context,
|
||||
ihost)
|
||||
|
||||
def test_kube_download_images(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES,
|
||||
)
|
||||
|
||||
# Download images
|
||||
self.service.kube_download_images(self.context, 'v1.42.2')
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state, kubernetes.KUBE_UPGRADE_STARTED)
|
||||
|
||||
def test_kube_download_images_ansible_fail(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
# Fake an ansible failure
|
||||
self.fake_subprocess_popen.returncode = 1
|
||||
|
||||
# Download images
|
||||
self.service.kube_download_images(self.context, 'v1.42.2')
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state, kubernetes.KUBE_UPGRADE_FAILED)
|
||||
|
||||
def test_kube_upgrade_control_plane_first_master(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c0 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
# Make the control plane upgrade pass
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.POD_START_INTERVAL = 1
|
||||
|
||||
# Upgrade the control plane
|
||||
self.service.kube_upgrade_control_plane(self.context, c0.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADED_FIRST_MASTER)
|
||||
|
||||
# Verify that the host upgrade status was cleared
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertEqual(updated_host_upgrade.status, None)
|
||||
|
||||
def test_kube_upgrade_control_plane_first_master_manifest_timeout(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c0 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
# Make the manifest apply fail
|
||||
self.fail_config_apply_runtime_manifest = True
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.MANIFEST_APPLY_TIMEOUT = 1
|
||||
|
||||
# Upgrade the control plane
|
||||
self.service.kube_upgrade_control_plane(self.context, c0.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADE_FAILED)
|
||||
|
||||
# Verify that the host upgrade status was set
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertIsNotNone(updated_host_upgrade.status)
|
||||
|
||||
def test_kube_upgrade_control_plane_first_master_upgrade_fail(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c0 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.POD_START_INTERVAL = 1
|
||||
kubernetes.POD_START_TIMEOUT = 1
|
||||
|
||||
# Upgrade the control plane
|
||||
self.service.kube_upgrade_control_plane(self.context, c0.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADE_FAILED)
|
||||
|
||||
# Verify that the host upgrade status was cleared
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertIsNotNone(updated_host_upgrade.status)
|
||||
|
||||
def test_kube_upgrade_control_plane_second_master(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_SECOND_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
mgmt_mac='00:11:22:33:44:55',
|
||||
mgmt_ip='1.2.3.4',
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
# Create controller-1
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c1 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-1',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
mgmt_mac='00:11:22:33:44:56',
|
||||
mgmt_ip='1.2.3.5',
|
||||
)
|
||||
# Set the target version for controller-1
|
||||
self.dbapi.kube_host_upgrade_update(2, {'target_version': 'v1.42.2'})
|
||||
# Make the control plane upgrade pass
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.2',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.POD_START_INTERVAL = 1
|
||||
|
||||
# Upgrade the control plane
|
||||
self.service.kube_upgrade_control_plane(self.context, c1.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADED_SECOND_MASTER)
|
||||
|
||||
# Verify that the host upgrade status was cleared
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertEqual(updated_host_upgrade.status, None)
|
||||
|
||||
def test_kube_upgrade_kubelet_controller(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c0 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
# Make the kubelet upgrade pass
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.1',
|
||||
'worker-0': 'v1.42.1'}
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.POD_START_INTERVAL = 1
|
||||
|
||||
# Upgrade the kubelet
|
||||
self.service.kube_upgrade_kubelet(self.context, c0.uuid)
|
||||
|
||||
# Verify that the upgrade state was not updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADED_SECOND_MASTER)
|
||||
|
||||
# Verify that the host upgrade status was cleared
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertEqual(updated_host_upgrade.status, None)
|
||||
|
||||
def test_kube_upgrade_kubelet_second_master(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_SECOND_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
mgmt_mac='00:11:22:33:44:55',
|
||||
mgmt_ip='1.2.3.4',
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
# Create controller-1
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c1 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-1',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
mgmt_mac='00:11:22:33:44:56',
|
||||
mgmt_ip='1.2.3.5',
|
||||
)
|
||||
# Set the target version for controller-1
|
||||
self.dbapi.kube_host_upgrade_update(2, {'target_version': 'v1.42.2'})
|
||||
# Make the kubelet upgrade pass
|
||||
self.kube_get_kubelet_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.2'}
|
||||
# Make the upgrade pass
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.2'}
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.POD_START_INTERVAL = 1
|
||||
|
||||
# Upgrade the kubelet
|
||||
self.service.kube_upgrade_kubelet(self.context, c1.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADE_COMPLETE)
|
||||
|
||||
# Verify that the host upgrade status was cleared
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertEqual(updated_host_upgrade.status, None)
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_manifest_timeout(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c0 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
# Make the manifest apply fail
|
||||
self.fail_config_apply_runtime_manifest = True
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.MANIFEST_APPLY_TIMEOUT = 1
|
||||
|
||||
# Upgrade the kubelet
|
||||
self.service.kube_upgrade_kubelet(self.context, c0.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADE_FAILED)
|
||||
|
||||
# Verify that the host upgrade status was set
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertIsNotNone(updated_host_upgrade.status)
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_upgrade_fail(self):
|
||||
# Create an upgrade
|
||||
utils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_FIRST_MASTER,
|
||||
)
|
||||
# Create controller-0
|
||||
config_uuid = str(uuid.uuid4())
|
||||
c0 = self._create_test_ihost(
|
||||
personality=constants.CONTROLLER,
|
||||
hostname='controller-0',
|
||||
uuid=str(uuid.uuid4()),
|
||||
config_status=None,
|
||||
config_applied=config_uuid,
|
||||
config_target=config_uuid,
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE,
|
||||
)
|
||||
# Set the target version for controller-0
|
||||
self.dbapi.kube_host_upgrade_update(1, {'target_version': 'v1.42.2'})
|
||||
|
||||
# Speed up the test
|
||||
kubernetes.MANIFEST_APPLY_INTERVAL = 1
|
||||
kubernetes.POD_START_INTERVAL = 1
|
||||
kubernetes.POD_START_TIMEOUT = 1
|
||||
|
||||
# Upgrade the kubelet
|
||||
self.service.kube_upgrade_kubelet(self.context, c0.uuid)
|
||||
|
||||
# Verify that the upgrade state was updated
|
||||
updated_upgrade = self.dbapi.kube_upgrade_get_one()
|
||||
self.assertEqual(updated_upgrade.state,
|
||||
kubernetes.KUBE_UPGRADE_FAILED)
|
||||
|
||||
# Verify that the host upgrade status was cleared
|
||||
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
|
||||
self.assertIsNotNone(updated_host_upgrade.status)
|
||||
|
|
|
@ -132,7 +132,6 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase):
|
|||
def _create_test_common(self):
|
||||
self._create_test_system()
|
||||
self._create_test_load()
|
||||
self._create_test_kube_upgrades()
|
||||
self._create_test_drbd()
|
||||
self._create_test_remotelogging()
|
||||
self._create_test_user()
|
||||
|
@ -179,29 +178,6 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase):
|
|||
self.ptp = dbutils.create_test_ptp(
|
||||
system_id=self.system.id)
|
||||
|
||||
def _create_test_kube_upgrades(self):
|
||||
# Test kube_upgrade and kube_host_upgrade table creation
|
||||
upgrade = dbutils.create_test_kube_upgrade()
|
||||
host_upgrade = dbutils.create_test_kube_host_upgrade()
|
||||
|
||||
# Test updating state in kube_upgrade table
|
||||
old_state = upgrade['state']
|
||||
new_state = 'upgrading'
|
||||
self.assertNotEqual(old_state, new_state)
|
||||
|
||||
res = self.dbapi.kube_upgrade_update(
|
||||
upgrade['id'], {'state': new_state})
|
||||
self.assertEqual(new_state, res['state'])
|
||||
|
||||
# Test updating status in kube_host_upgrade table
|
||||
old_status = host_upgrade['status']
|
||||
new_status = new_state
|
||||
self.assertNotEqual(old_status, new_status)
|
||||
|
||||
res = self.dbapi.kube_host_upgrade_update(
|
||||
host_upgrade['uuid'], {'status': new_status})
|
||||
self.assertEqual(new_status, res['status'])
|
||||
|
||||
def _create_test_network(self, name, nettype, subnet, ranges=None):
|
||||
if not ranges:
|
||||
ranges = [(str(subnet[2]), str(subnet[-2]))]
|
||||
|
|
|
@ -389,3 +389,26 @@ class DbNodeTestCase(base.DbTestCase):
|
|||
|
||||
upd = self.dbapi.storage_ceph_update(res['id'], values)
|
||||
self.assertEqual(values['services'], upd['services'])
|
||||
|
||||
def test_kube_upgrades(self):
|
||||
# Test kube_upgrade and kube_host_upgrade table creation
|
||||
upgrade = utils.create_test_kube_upgrade()
|
||||
host_upgrade = utils.create_test_kube_host_upgrade()
|
||||
|
||||
# Test updating state in kube_upgrade table
|
||||
old_state = upgrade['state']
|
||||
new_state = 'upgrading'
|
||||
self.assertNotEqual(old_state, new_state)
|
||||
|
||||
res = self.dbapi.kube_upgrade_update(
|
||||
upgrade['id'], {'state': new_state})
|
||||
self.assertEqual(new_state, res['state'])
|
||||
|
||||
# Test updating status in kube_host_upgrade table
|
||||
old_status = host_upgrade['status']
|
||||
new_status = new_state
|
||||
self.assertNotEqual(old_status, new_status)
|
||||
|
||||
res = self.dbapi.kube_host_upgrade_update(
|
||||
host_upgrade['uuid'], {'status': new_status})
|
||||
self.assertEqual(new_status, res['status'])
|
||||
|
|
|
@ -236,13 +236,26 @@ def create_test_load(**kw):
|
|||
return dbapi.load_create(load)
|
||||
|
||||
|
||||
def get_test_kube_upgrade():
|
||||
def post_get_test_kube_upgrade(**kw):
|
||||
upgrade = get_test_kube_upgrade(**kw)
|
||||
del upgrade['id']
|
||||
del upgrade['uuid']
|
||||
del upgrade['from_version']
|
||||
del upgrade['state']
|
||||
del upgrade['reserved_1']
|
||||
del upgrade['reserved_2']
|
||||
del upgrade['reserved_3']
|
||||
del upgrade['reserved_4']
|
||||
return upgrade
|
||||
|
||||
|
||||
def get_test_kube_upgrade(**kw):
|
||||
upgrade = {
|
||||
'id': 1,
|
||||
'uuid': uuidutils.generate_uuid(),
|
||||
"from_version": 1,
|
||||
"to_version": 2,
|
||||
"state": "tbd",
|
||||
'uuid': kw.get('uuid', uuidutils.generate_uuid()),
|
||||
"from_version": kw.get('from_version', 'v1.42.1'),
|
||||
"to_version": kw.get('to_version', 'v1.42.2'),
|
||||
"state": kw.get('state', 'upgrade-started'),
|
||||
"reserved_1": "res1",
|
||||
"reserved_2": "res2",
|
||||
"reserved_3": "res3",
|
||||
|
@ -255,7 +268,7 @@ def get_test_kube_host_upgrade():
|
|||
upgrade = {
|
||||
'id': 1,
|
||||
'uuid': uuidutils.generate_uuid(),
|
||||
"target_version": 2,
|
||||
"target_version": 'v1.42.1',
|
||||
"status": "tbd",
|
||||
"reserved_1": "",
|
||||
"reserved_2": "",
|
||||
|
@ -266,8 +279,8 @@ def get_test_kube_host_upgrade():
|
|||
return upgrade
|
||||
|
||||
|
||||
def create_test_kube_upgrade():
|
||||
upgrade = get_test_kube_upgrade()
|
||||
def create_test_kube_upgrade(**kw):
|
||||
upgrade = get_test_kube_upgrade(**kw)
|
||||
|
||||
# Let DB generate ID and uuid
|
||||
if 'id' in upgrade:
|
||||
|
@ -281,7 +294,7 @@ def create_test_kube_upgrade():
|
|||
|
||||
|
||||
def create_test_kube_host_upgrade():
|
||||
upgrade = get_test_kube_upgrade()
|
||||
upgrade = get_test_kube_host_upgrade()
|
||||
|
||||
# Let DB generate ID, uuid and host_id
|
||||
if 'id' in upgrade:
|
||||
|
|
|
@ -53,18 +53,18 @@ class TestKubeHostUpgradesObject(base.DbTestCase):
|
|||
self.mox.StubOutWithMock(self.dbapi, 'kube_host_upgrade_get')
|
||||
|
||||
first_obj = objects.kube_host_upgrade.from_db_object(self._get_db_data(
|
||||
dict(self.fake_upgrade_data, target_version=1)))
|
||||
dict(self.fake_upgrade_data, target_version='v1.42.1')))
|
||||
second_obj = objects.kube_host_upgrade.from_db_object(self._get_db_data(
|
||||
dict(self.fake_upgrade_data, target_version=2)))
|
||||
dict(self.fake_upgrade_data, target_version='v1.42.2')))
|
||||
|
||||
self.dbapi.kube_host_upgrade_get(uuid).AndReturn(first_obj)
|
||||
self.dbapi.kube_host_upgrade_get(uuid).AndReturn(second_obj)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
n = objects.kube_host_upgrade.get_by_uuid(self.admin_context, uuid)
|
||||
self.assertEqual(n.target_version, 1)
|
||||
self.assertEqual(n.target_version, 'v1.42.1')
|
||||
n.refresh()
|
||||
self.assertEqual(n.target_version, 2)
|
||||
self.assertEqual(n.target_version, 'v1.42.2')
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def test_objectify(self):
|
||||
|
|
|
@ -53,18 +53,18 @@ class TestKubeUpgradesObject(base.DbTestCase):
|
|||
self.mox.StubOutWithMock(self.dbapi, 'kube_upgrade_get')
|
||||
|
||||
first_obj = objects.kube_upgrade.from_db_object(self._get_db_data(
|
||||
dict(self.fake_upgrade_data, to_version=1)))
|
||||
dict(self.fake_upgrade_data, to_version='v1.42.1')))
|
||||
second_obj = objects.kube_upgrade.from_db_object(self._get_db_data(
|
||||
dict(self.fake_upgrade_data, to_version=2)))
|
||||
dict(self.fake_upgrade_data, to_version='v1.42.2')))
|
||||
|
||||
self.dbapi.kube_upgrade_get(uuid).AndReturn(first_obj)
|
||||
self.dbapi.kube_upgrade_get(uuid).AndReturn(second_obj)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
n = objects.kube_upgrade.get_by_uuid(self.admin_context, uuid)
|
||||
self.assertEqual(n.to_version, 1)
|
||||
self.assertEqual(n.to_version, 'v1.42.1')
|
||||
n.refresh()
|
||||
self.assertEqual(n.to_version, 2)
|
||||
self.assertEqual(n.to_version, 'v1.42.2')
|
||||
self.mox.VerifyAll()
|
||||
|
||||
def test_objectify(self):
|
||||
|
|
Loading…
Reference in New Issue