Kubernetes upgrades support (continued)

Continuing the implementation of kubernetes upgrades. Changes
include:
- Pre-pull control plane images on both controllers as part of
  the "system kube-upgrade-start". This is necessary, because the
  upgrade of the first control plane with "kubeadm upgrade"
  creates pods on both masters to download the control plane
  images. These pods cannot download images from authenticated
  registries, so we must pre-pull them.
- Add a new "system kube-upgrade-complete" command to set the
  upgrade state to complete (after checking that all the
  required components have been upgraded).
- Add a --force option to the "system kube-host-upgrade" command
  to allow a kubelet to be re-upgraded even if it is reporting
  that it is running the new version. This could be necessary
  to handle some error scenarios.

Change-Id: If7d047ed49725d810407b22ccd5eca90a7658189
Depends-On: https://review.opendev.org/#/c/697185
Story: 2006781
Task: 37579
Task: 37581
Signed-off-by: Bart Wensley <barton.wensley@windriver.com>
This commit is contained in:
Bart Wensley 2019-12-03 14:59:08 -06:00
parent 28b3bd8ba2
commit 35984d5a31
13 changed files with 422 additions and 53 deletions

View File

@ -103,7 +103,7 @@ class HostTest(test_shell.ShellTest):
self.ihost_manager_kube_upgrade_control_plane_result = \
[ihost(None, FAKE_IHOST, True)]
def mock_ihost_manager_kube_upgrade_control_plane(obj, hostid):
def mock_ihost_manager_kube_upgrade_control_plane(obj, hostid, force):
return self.ihost_manager_kube_upgrade_control_plane_result
self.mocked_ihost_manager_kube_upgrade_control_plane = mock.patch(
'cgtsclient.v1.ihost.ihostManager.kube_upgrade_control_plane',
@ -112,7 +112,7 @@ class HostTest(test_shell.ShellTest):
self.addCleanup(
self.mocked_ihost_manager_kube_upgrade_control_plane.stop)
def mock_ihost_manager_kube_upgrade_kubelet(obj, hostid):
def mock_ihost_manager_kube_upgrade_kubelet(obj, hostid, force):
return self.ihost_manager_kube_upgrade_kubelet_result
self.mocked_ihost_manager_kube_upgrade_kubelet = mock.patch(

View File

@ -121,6 +121,30 @@ class KubeUpgradeTest(test_shell.ShellTest):
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.update')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')
def test_kube_upgrade_complete(self, mock_get_endpoint, mock_get_client,
mock_update):
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
fake_kube_upgrade = {'from_version': 'v1.42.1',
'to_version': 'v1.42.2',
'state': 'upgrade-complete',
'uuid': 'cb737aba-1820-4184-b0dc-9b073822af48',
'created_at': 'fake-created-time',
'updated_at': 'fake-updated-time',
}
mock_update.return_value = KubeUpgrade(None, fake_kube_upgrade, True)
self.make_env()
results = self.shell("kube-upgrade-complete")
self.assertIn(fake_kube_upgrade['from_version'], results)
self.assertIn(fake_kube_upgrade['to_version'], results)
self.assertIn(fake_kube_upgrade['state'], results)
self.assertIn(fake_kube_upgrade['uuid'], results)
self.assertIn(fake_kube_upgrade['created_at'], results)
self.assertIn(fake_kube_upgrade['updated_at'], results)
@mock.patch('cgtsclient.v1.kube_upgrade.KubeUpgradeManager.delete')
@mock.patch('cgtsclient.client._get_ksclient')
@mock.patch('cgtsclient.client._get_endpoint')

View File

@ -813,14 +813,18 @@ def do_host_upgrade(cc, args):
@utils.arg('component',
metavar='<component>',
choices=['control-plane', 'kubelet'],
help='kubernetes component to upgrade')
help='Kubernetes component to upgrade')
@utils.arg('-f', '--force',
action='store_true',
default=False,
help="Force the kubernetes upgrade operation ")
def do_kube_host_upgrade(cc, args):
"""Perform kubernetes upgrade for a host."""
if args.component == 'control-plane':
host = cc.ihost.kube_upgrade_control_plane(args.hostid)
host = cc.ihost.kube_upgrade_control_plane(args.hostid, args.force)
elif args.component == 'kubelet':
host = cc.ihost.kube_upgrade_kubelet(args.hostid)
host = cc.ihost.kube_upgrade_kubelet(args.hostid, args.force)
else:
raise exc.CommandError('Invalid component value: %s' % args.component)

View File

@ -122,14 +122,20 @@ class ihostManager(base.Manager):
result = self._json_get(self._path('bulk_export'))
return result
def kube_upgrade_control_plane(self, hostid):
def kube_upgrade_control_plane(self, hostid, force):
post_body = {}
post_body['force'] = force
resp, body = self.api.json_request(
'POST', self._path(hostid) + "/kube_upgrade_control_plane")
'POST', self._path(hostid) + "/kube_upgrade_control_plane",
body=post_body)
return self.resource_class(self, body)
def kube_upgrade_kubelet(self, hostid):
def kube_upgrade_kubelet(self, hostid, force):
post_body = {}
post_body['force'] = force
resp, body = self.api.json_request(
'POST', self._path(hostid) + "/kube_upgrade_kubelet")
'POST', self._path(hostid) + "/kube_upgrade_kubelet",
body=post_body)
return self.resource_class(self, body)

View File

@ -49,9 +49,7 @@ class KubeUpgradeManager(base.Manager):
def delete(self):
"""Delete a kubernetes upgrade."""
res, body = self.api.json_request('DELETE', self._path())
if body:
return self.resource_class(self, body)
return self.api.json_request('DELETE', self._path())
def update(self, patch):
"""Update a kubernetes upgrade."""

View File

@ -9,6 +9,7 @@ from cgtsclient import exc
# Kubernetes constants
KUBE_UPGRADE_STATE_UPGRADING_NETWORKING = 'upgrading-networking'
KUBE_UPGRADE_STATE_COMPLETE = 'upgrade-complete'
def _print_kube_upgrade_show(obj):
@ -64,6 +65,23 @@ def do_kube_upgrade_networking(cc, args):
_print_kube_upgrade_show(kube_upgrade)
def do_kube_upgrade_complete(cc, args):
"""Complete a kubernetes upgrade."""
data = dict()
data['state'] = KUBE_UPGRADE_STATE_COMPLETE
patch = []
for (k, v) in data.items():
patch.append({'op': 'replace', 'path': '/' + k, 'value': v})
try:
kube_upgrade = cc.kube_upgrade.update(patch)
except exc.HTTPNotFound:
raise exc.CommandError('Kubernetes upgrade UUID not found')
_print_kube_upgrade_show(kube_upgrade)
def do_kube_upgrade_delete(cc, args):
"""Delete a kubernetes upgrade."""

View File

@ -6466,8 +6466,8 @@ class HostController(rest.RestController):
hostupdate.ihost_val.update(val)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(Host, six.text_type)
def kube_upgrade_control_plane(self, uuid):
@wsme_pecan.wsexpose(Host, six.text_type, body=six.text_type)
def kube_upgrade_control_plane(self, uuid, body):
"""Upgrade the kubernetes control plane on this host"""
host_obj = objects.host.get_by_uuid(pecan.request.context, uuid)
@ -6541,10 +6541,11 @@ class HostController(rest.RestController):
return Host.convert_with_links(host_obj)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(Host, six.text_type)
def kube_upgrade_kubelet(self, uuid):
@wsme_pecan.wsexpose(Host, six.text_type, body=six.text_type)
def kube_upgrade_kubelet(self, uuid, body):
"""Upgrade the kubernetes kubelet on this host"""
force = body.get('force', False) is True
host_obj = objects.host.get_by_uuid(pecan.request.context, uuid)
# The kubernetes upgrade must have been started
@ -6605,14 +6606,24 @@ class HostController(rest.RestController):
"The kubelets on all controller hosts must be upgraded "
"before upgrading kubelets on worker hosts."))
# Check the existing kubelet version
current_kubelet_version = kubelet_versions.get(host_obj.hostname)
if current_kubelet_version == kube_upgrade_obj.to_version:
raise wsme.exc.ClientSideError(_(
"The kubelet on this host was already upgraded."))
elif current_kubelet_version is None:
raise wsme.exc.ClientSideError(_(
"Unable to determine the version of the kubelet on this host."))
# Check whether this host was already upgraded
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
pecan.request.context, host_obj.id)
if kube_host_upgrade_obj.target_version == kube_upgrade_obj.to_version:
# Check the existing kubelet version
current_kubelet_version = kubelet_versions.get(host_obj.hostname)
if current_kubelet_version == kube_upgrade_obj.to_version:
# If the force option was used, we will redo the upgrade
if force:
LOG.info("Redoing kubernetes upgrade for %s" %
host_obj.hostname)
else:
raise wsme.exc.ClientSideError(_(
"The kubelet on this host was already upgraded."))
elif current_kubelet_version is None:
raise wsme.exc.ClientSideError(_(
"Unable to determine the version of the kubelet on this "
"host."))
# Verify the host is in the correct state
if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX:
@ -6628,8 +6639,6 @@ class HostController(rest.RestController):
# Set the target version if this is a worker host
if host_obj.personality == constants.WORKER:
kube_host_upgrade_obj = objects.kube_host_upgrade.get_by_host_id(
pecan.request.context, host_obj.id)
kube_host_upgrade_obj.target_version = kube_upgrade_obj.to_version
kube_host_upgrade_obj.save()
@ -6637,9 +6646,10 @@ class HostController(rest.RestController):
pecan.request.rpcapi.kube_upgrade_kubelet(pecan.request.context,
host_obj.uuid)
# Update the upgrade state
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_KUBELETS
kube_upgrade_obj.save()
# Update the upgrade state if necessary
if kube_upgrade_obj.state != kubernetes.KUBE_UPGRADING_KUBELETS:
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_KUBELETS
kube_upgrade_obj.save()
LOG.info("Upgrading kubernetes kubelet on host %s" %
host_obj.hostname)

View File

@ -267,14 +267,36 @@ class KubeUpgradeController(rest.RestController):
"networking" %
kubernetes.KUBE_UPGRADED_FIRST_MASTER))
# Update the upgrade state
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_NETWORKING
kube_upgrade_obj.save()
# Tell the conductor to upgrade networking
pecan.request.rpcapi.kube_upgrade_networking(
pecan.request.context, kube_upgrade_obj.to_version)
# Update the upgrade state
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADING_NETWORKING
return KubeUpgrade.convert_with_links(kube_upgrade_obj)
elif updates['state'] == kubernetes.KUBE_UPGRADE_COMPLETE:
# Make sure upgrade is in the correct state to complete
if kube_upgrade_obj.state != \
kubernetes.KUBE_UPGRADING_KUBELETS:
raise wsme.exc.ClientSideError(_(
"Kubernetes upgrade must be in %s state to complete" %
kubernetes.KUBE_UPGRADING_KUBELETS))
# Make sure the target version is active
version_states = self._kube_operator.kube_get_version_states()
if version_states.get(kube_upgrade_obj.to_version, None) != \
kubernetes.KUBE_STATE_ACTIVE:
raise wsme.exc.ClientSideError(_(
"Kubernetes to_version must be active to complete"))
# All is well, mark the upgrade as complete
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_COMPLETE
kube_upgrade_obj.save()
return KubeUpgrade.convert_with_links(kube_upgrade_obj)
else:
raise wsme.exc.ClientSideError(_(
"Invalid state %s supplied" % updates['state']))

View File

@ -10692,6 +10692,8 @@ class ConductorManager(service.PeriodicService):
LOG.info("executing playbook: %s for version %s" %
(constants.ANSIBLE_KUBE_PUSH_IMAGES_PLAYBOOK, kube_version))
# Execute the playbook to download the images from the external
# registry to registry.local.
proc = subprocess.Popen(
['ansible-playbook', '-e', 'kubernetes_version=%s' % kube_version,
constants.ANSIBLE_KUBE_PUSH_IMAGES_PLAYBOOK],
@ -10703,13 +10705,50 @@ class ConductorManager(service.PeriodicService):
if proc.returncode:
LOG.warning("ansible-playbook returned an error: %s" %
proc.returncode)
new_state = kubernetes.KUBE_UPGRADE_FAILED
# Update the upgrade state
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_FAILED
kube_upgrade_obj.save()
return
# Update the config for the controller host(s)
personalities = [constants.CONTROLLER]
config_uuid = self._config_update_hosts(context, personalities)
# Apply the runtime manifest to have docker download the images on
# each controller.
config_dict = {
"personalities": personalities,
"classes": 'platform::kubernetes::pre_pull_control_plane_images'
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
# Wait for the manifest(s) to be applied
elapsed = 0
while elapsed < kubernetes.MANIFEST_APPLY_TIMEOUT:
elapsed += kubernetes.MANIFEST_APPLY_INTERVAL
greenthread.sleep(kubernetes.MANIFEST_APPLY_INTERVAL)
controller_hosts = self.dbapi.ihost_get_by_personality(
constants.CONTROLLER)
for host_obj in controller_hosts:
if host_obj.config_target != host_obj.config_applied:
# At least one controller has not been updated yet
LOG.debug("Waiting for config apply on host %s" %
host_obj.hostname)
break
else:
LOG.info("Config was applied for all controller hosts")
break
else:
new_state = kubernetes.KUBE_UPGRADE_STARTED
LOG.warning("Manifest apply failed for a controller host")
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_FAILED
kube_upgrade_obj.save()
return
# Update the upgrade state
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
kube_upgrade_obj.state = new_state
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_STARTED
kube_upgrade_obj.save()
def kube_upgrade_control_plane(self, context, host_uuid):
@ -10887,14 +10926,6 @@ class ConductorManager(service.PeriodicService):
kube_host_upgrade_obj.status = None
kube_host_upgrade_obj.save()
# Check whether the upgrade is complete
version_states = kube_operator.kube_get_version_states()
if version_states.get(target_version, None) == \
kubernetes.KUBE_STATE_ACTIVE:
kube_upgrade_obj = objects.kube_upgrade.get_one(context)
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADE_COMPLETE
kube_upgrade_obj.save()
def kube_upgrade_networking(self, context, kube_version):
"""Upgrade kubernetes networking for this kubernetes version"""

View File

@ -53,6 +53,8 @@ class KubernetesPuppet(base.BasePuppet):
self._get_dns_service_domain(),
'platform::kubernetes::params::dns_service_ip':
self._get_dns_service_ip(),
'platform::kubernetes::params::upgrade_to_version':
self._get_kubernetes_upgrade_to_version(),
})
return config
@ -160,6 +162,16 @@ class KubernetesPuppet(base.BasePuppet):
subnet = netaddr.IPNetwork(self._get_cluster_service_subnet())
return str(subnet[CLUSTER_SERVICE_DNS_IP_OFFSET])
def _get_kubernetes_upgrade_to_version(self):
try:
# Get the kubernetes upgrade record
kube_upgrade_obj = self.dbapi.kube_upgrade_get_one()
except exception.NotFound:
# No upgrade is in progress
return None
else:
return kube_upgrade_obj.to_version
def _get_kubernetes_version(self, host):
config = {}

View File

@ -807,6 +807,90 @@ class TestPostKubeUpgrades(TestHost):
self.assertIn("host does not have a kubelet",
result.json['error_message'])
def test_kube_upgrade_kubelet_controller_0_repeated(self):
# Test upgrading kubernetes kubelet on controller-0 when it was already
# done
# Create controller-0
self._create_controller_0(
invprovision=constants.PROVISIONED,
administrative=constants.ADMIN_LOCKED,
operational=constants.OPERATIONAL_DISABLED,
availability=constants.AVAILABILITY_ONLINE)
# Update the target version
values = {'target_version': 'v1.42.2'}
self.dbapi.kube_host_upgrade_update(1, values)
# Indicate the kubelet is already upgraded
self.kube_get_kubelet_versions_result = {
'controller-0': 'v1.42.2'}
# Create the upgrade
dbutils.create_test_kube_upgrade(
from_version='v1.42.1',
to_version='v1.42.2',
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
)
# Upgrade the kubelet
body = {}
result = self.post_json(
'/ihosts/controller-0/kube_upgrade_kubelet',
body, headers={'User-Agent': 'sysinv-test'},
expect_errors=True)
# Verify the failure
self.assertEqual(result.content_type, 'application/json')
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
self.assertTrue(result.json['error_message'])
self.assertIn("kubelet on this host was already upgraded",
result.json['error_message'])
def test_kube_upgrade_kubelet_controller_0_repeated_force(self):
# Test upgrading kubernetes kubelet on controller-0 when it was already
# done, but allowed because of the force option
# Create controller-0
c0 = self._create_controller_0(
invprovision=constants.PROVISIONED,
administrative=constants.ADMIN_LOCKED,
operational=constants.OPERATIONAL_DISABLED,
availability=constants.AVAILABILITY_ONLINE)
# Update the target version
values = {'target_version': 'v1.42.2'}
self.dbapi.kube_host_upgrade_update(1, values)
# Indicate the kubelet is already upgraded
self.kube_get_kubelet_versions_result = {
'controller-0': 'v1.42.2'}
# Create the upgrade
kube_upgrade = dbutils.create_test_kube_upgrade(
from_version='v1.42.1',
to_version='v1.42.2',
state=kubernetes.KUBE_UPGRADED_SECOND_MASTER,
)
# Upgrade the kubelet
body = {'force': True}
result = self.post_json(
'/ihosts/controller-0/kube_upgrade_kubelet',
body, headers={'User-Agent': 'sysinv-test'})
# Verify the host was returned
self.assertEqual(result.json['hostname'], 'controller-0')
# Verify the kubelet was upgraded
self.fake_conductor_api.kube_upgrade_kubelet.\
assert_called_with(mock.ANY, c0.uuid)
# Verify that the upgrade state was updated
result = self.get_json('/kube_upgrade/%s' % kube_upgrade.uuid)
self.assertEqual(result['state'],
kubernetes.KUBE_UPGRADING_KUBELETS)
def test_kube_upgrade_kubelet_controller_0_wrong_upgrade_state(self):
# Test upgrading kubernetes kubelet on controller-0 with upgrade in
# the wrong state.
@ -947,6 +1031,10 @@ class TestPostKubeUpgrades(TestHost):
operational=constants.OPERATIONAL_DISABLED,
availability=constants.AVAILABILITY_ONLINE)
# Update the target version
values = {'target_version': 'v1.42.2'}
self.dbapi.kube_host_upgrade_update(1, values)
# Create the upgrade
dbutils.create_test_kube_upgrade(
from_version='v1.42.1',

View File

@ -331,8 +331,8 @@ class TestPostKubeUpgrade(TestKubeUpgrade, dbbase.ControllerHostTestCase):
class TestPatch(TestKubeUpgrade):
def test_update_state(self):
# Test updating the state of an upgrade
def test_update_state_upgrade_networking(self):
# Test updating the state of an upgrade to upgrade networking
# Create the upgrade
kube_upgrade = dbutils.create_test_kube_upgrade(
@ -364,6 +364,40 @@ class TestPatch(TestKubeUpgrade):
self.assertEqual(result['to_version'], 'v1.43.2')
self.assertEqual(result['state'], new_state)
def test_update_state_complete(self):
# Test updating the state of an upgrade to complete
self.kube_get_version_states_result = {'v1.42.1': 'available',
'v1.42.2': 'available',
'v1.43.1': 'available',
'v1.43.2': 'active',
'v1.43.3': 'available'}
# Create the upgrade
kube_upgrade = dbutils.create_test_kube_upgrade(
from_version='v1.43.1',
to_version='v1.43.2',
state=kubernetes.KUBE_UPGRADING_KUBELETS)
uuid = kube_upgrade.uuid
# Update state
new_state = kubernetes.KUBE_UPGRADE_COMPLETE
response = self.patch_json('/kube_upgrade',
[{'path': '/state',
'value': new_state,
'op': 'replace'}],
headers={'User-Agent': 'sysinv-test'})
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json['from_version'], 'v1.43.1')
self.assertEqual(response.json['to_version'], 'v1.43.2')
self.assertEqual(response.json['state'], new_state)
# Verify that the upgrade was updated with the new state
result = self.get_json('/kube_upgrade/%s' % uuid)
self.assertEqual(result['from_version'], 'v1.43.1')
self.assertEqual(result['to_version'], 'v1.43.2')
self.assertEqual(result['state'], new_state)
def test_update_state_no_upgrade(self):
# Test updating the state when an upgrade doesn't exist
@ -392,7 +426,7 @@ class TestPatch(TestKubeUpgrade):
state=kubernetes.KUBE_UPGRADED_FIRST_MASTER)
# Update state
new_state = kubernetes.KUBE_UPGRADE_COMPLETE
new_state = 'this-is-a-bad-state'
result = self.patch_json('/kube_upgrade',
[{'path': '/state',
'value': new_state,

View File

@ -109,9 +109,17 @@ class ManagerTestCase(base.DbTestCase):
config_dict):
if not self.fail_config_apply_runtime_manifest:
# Pretend the config was applied
for host_uuid in config_dict['host_uuids']:
self.dbapi.ihost_update(host_uuid,
{'config_applied': config_uuid})
if 'host_uuids' in config_dict:
for host_uuid in config_dict['host_uuids']:
self.dbapi.ihost_update(host_uuid,
{'config_applied': config_uuid})
else:
for personality in config_dict['personalities']:
hosts = self.dbapi.ihost_get_by_personality(personality)
for host in hosts:
self.dbapi.ihost_update(
host.uuid, {'config_applied': config_uuid})
self.mocked_config_apply_runtime_manifest = mock.patch.object(
manager.ConductorManager, '_config_apply_runtime_manifest',
mock_config_apply_runtime_manifest)
@ -437,6 +445,125 @@ class ManagerTestCase(base.DbTestCase):
updated_upgrade = self.dbapi.kube_upgrade_get_one()
self.assertEqual(updated_upgrade.state, kubernetes.KUBE_UPGRADE_FAILED)
def test_kube_download_images_one_controller(self):
# Create an upgrade
utils.create_test_kube_upgrade(
from_version='v1.42.1',
to_version='v1.42.2',
state=kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES,
)
# Create controller-0
config_uuid = str(uuid.uuid4())
self._create_test_ihost(
personality=constants.CONTROLLER,
hostname='controller-0',
uuid=str(uuid.uuid4()),
config_status=None,
config_applied=config_uuid,
config_target=config_uuid,
invprovision=constants.PROVISIONED,
administrative=constants.ADMIN_UNLOCKED,
operational=constants.OPERATIONAL_ENABLED,
availability=constants.AVAILABILITY_ONLINE,
)
# Speed up the test
kubernetes.MANIFEST_APPLY_INTERVAL = 1
kubernetes.MANIFEST_APPLY_TIMEOUT = 1
# Download images
self.service.kube_download_images(self.context, 'v1.42.2')
# Verify that the upgrade state was updated
updated_upgrade = self.dbapi.kube_upgrade_get_one()
self.assertEqual(updated_upgrade.state, kubernetes.KUBE_UPGRADE_STARTED)
def test_kube_download_images_one_controller_manifest_timeout(self):
# Create an upgrade
utils.create_test_kube_upgrade(
from_version='v1.42.1',
to_version='v1.42.2',
state=kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES,
)
# Create controller-0
config_uuid = str(uuid.uuid4())
self._create_test_ihost(
personality=constants.CONTROLLER,
hostname='controller-0',
uuid=str(uuid.uuid4()),
config_status=None,
config_applied=config_uuid,
config_target=config_uuid,
invprovision=constants.PROVISIONED,
administrative=constants.ADMIN_UNLOCKED,
operational=constants.OPERATIONAL_ENABLED,
availability=constants.AVAILABILITY_ONLINE,
)
# Speed up the test
kubernetes.MANIFEST_APPLY_INTERVAL = 1
kubernetes.MANIFEST_APPLY_TIMEOUT = 1
# Make the manifest apply fail
self.fail_config_apply_runtime_manifest = True
# Download images
self.service.kube_download_images(self.context, 'v1.42.2')
# Verify that the upgrade state was updated
updated_upgrade = self.dbapi.kube_upgrade_get_one()
self.assertEqual(updated_upgrade.state,
kubernetes.KUBE_UPGRADE_FAILED)
def test_kube_download_images_two_controllers(self):
# Create an upgrade
utils.create_test_kube_upgrade(
from_version='v1.42.1',
to_version='v1.42.2',
state=kubernetes.KUBE_UPGRADE_DOWNLOADING_IMAGES,
)
# Create controller-0
config_uuid = str(uuid.uuid4())
self._create_test_ihost(
personality=constants.CONTROLLER,
hostname='controller-0',
uuid=str(uuid.uuid4()),
config_status=None,
config_applied=config_uuid,
config_target=config_uuid,
invprovision=constants.PROVISIONED,
administrative=constants.ADMIN_UNLOCKED,
operational=constants.OPERATIONAL_ENABLED,
availability=constants.AVAILABILITY_ONLINE,
)
# Create controller-1
config_uuid = str(uuid.uuid4())
self._create_test_ihost(
personality=constants.CONTROLLER,
hostname='controller-1',
uuid=str(uuid.uuid4()),
config_status=None,
config_applied=config_uuid,
config_target=config_uuid,
invprovision=constants.PROVISIONED,
administrative=constants.ADMIN_UNLOCKED,
operational=constants.OPERATIONAL_ENABLED,
availability=constants.AVAILABILITY_ONLINE,
mgmt_mac='00:11:22:33:44:56',
mgmt_ip='1.2.3.5',
)
# Speed up the test
kubernetes.MANIFEST_APPLY_INTERVAL = 1
kubernetes.MANIFEST_APPLY_TIMEOUT = 1
# Download images
self.service.kube_download_images(self.context, 'v1.42.2')
# Verify that the upgrade state was updated
updated_upgrade = self.dbapi.kube_upgrade_get_one()
self.assertEqual(updated_upgrade.state, kubernetes.KUBE_UPGRADE_STARTED)
def test_kube_upgrade_control_plane_first_master(self):
# Create an upgrade
utils.create_test_kube_upgrade(
@ -734,11 +861,6 @@ class ManagerTestCase(base.DbTestCase):
# Upgrade the kubelet
self.service.kube_upgrade_kubelet(self.context, c1.uuid)
# Verify that the upgrade state was updated
updated_upgrade = self.dbapi.kube_upgrade_get_one()
self.assertEqual(updated_upgrade.state,
kubernetes.KUBE_UPGRADE_COMPLETE)
# Verify that the host upgrade status was cleared
updated_host_upgrade = self.dbapi.kube_host_upgrade_get(1)
self.assertEqual(updated_host_upgrade.status, None)