Merge "DC kube upgrade robustness improvements"
This commit is contained in:
@@ -42,6 +42,12 @@ SUPPORTED_STRATEGY_TYPES = [
|
|||||||
consts.SW_UPDATE_TYPE_UPGRADE
|
consts.SW_UPDATE_TYPE_UPGRADE
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# some strategies allow force for all subclouds
|
||||||
|
FORCE_ALL_TYPES = [
|
||||||
|
consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE,
|
||||||
|
consts.SW_UPDATE_TYPE_KUBERNETES
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class SwUpdateStrategyController(object):
|
class SwUpdateStrategyController(object):
|
||||||
|
|
||||||
@@ -161,8 +167,7 @@ class SwUpdateStrategyController(object):
|
|||||||
if force_flag is not None:
|
if force_flag is not None:
|
||||||
if force_flag not in ["true", "false"]:
|
if force_flag not in ["true", "false"]:
|
||||||
pecan.abort(400, _('force invalid'))
|
pecan.abort(400, _('force invalid'))
|
||||||
elif strategy_type != consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
|
elif strategy_type not in FORCE_ALL_TYPES:
|
||||||
# kube rootca update allows force for all subclouds
|
|
||||||
if payload.get('cloud_name') is None:
|
if payload.get('cloud_name') is None:
|
||||||
pecan.abort(400,
|
pecan.abort(400,
|
||||||
_('The --force option can only be applied '
|
_('The --force option can only be applied '
|
||||||
|
|||||||
@@ -146,15 +146,9 @@ STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY = "creating fw update strategy"
|
|||||||
STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY = "applying fw update strategy"
|
STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY = "applying fw update strategy"
|
||||||
STRATEGY_STATE_FINISHING_FW_UPDATE = "finishing fw update"
|
STRATEGY_STATE_FINISHING_FW_UPDATE = "finishing fw update"
|
||||||
|
|
||||||
# Kubernetes update orchestration states
|
# Kubernetes update orchestration states (ordered)
|
||||||
STRATEGY_STATE_KUBE_UPDATING_PATCHES = \
|
STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK = \
|
||||||
"kube updating patches"
|
"kube upgrade pre check"
|
||||||
STRATEGY_STATE_KUBE_CREATING_VIM_PATCH_STRATEGY = \
|
|
||||||
"kube creating vim patch strategy"
|
|
||||||
STRATEGY_STATE_KUBE_APPLYING_VIM_PATCH_STRATEGY = \
|
|
||||||
"kube applying vim patch strategy"
|
|
||||||
STRATEGY_STATE_KUBE_DELETING_VIM_PATCH_STRATEGY = \
|
|
||||||
"kube deleting vim patch strategy"
|
|
||||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY = \
|
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY = \
|
||||||
"kube creating vim kube upgrade strategy"
|
"kube creating vim kube upgrade strategy"
|
||||||
STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY = \
|
STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY = \
|
||||||
@@ -254,6 +248,8 @@ IMPORTED_LOAD_STATES = [
|
|||||||
IMPORTED_METADATA_LOAD_STATE
|
IMPORTED_METADATA_LOAD_STATE
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# extra_args for kube upgrade
|
||||||
|
EXTRA_ARGS_TO_VERSION = 'to-version'
|
||||||
# extra_args for kube rootca update
|
# extra_args for kube rootca update
|
||||||
EXTRA_ARGS_CERT_FILE = 'cert-file'
|
EXTRA_ARGS_CERT_FILE = 'cert-file'
|
||||||
EXTRA_ARGS_EXPIRY_DATE = 'expiry-date'
|
EXTRA_ARGS_EXPIRY_DATE = 'expiry-date'
|
||||||
|
|||||||
@@ -350,15 +350,54 @@ def get_vault_load_files(target_version):
|
|||||||
|
|
||||||
|
|
||||||
def get_active_kube_version(kube_versions):
|
def get_active_kube_version(kube_versions):
|
||||||
"""Returns the active version name for kubernetes from a list of versions"""
|
"""Returns the active (target) kubernetes from a list of versions"""
|
||||||
|
|
||||||
active_kube_version = None
|
matching_kube_version = None
|
||||||
for kube in kube_versions:
|
for kube in kube_versions:
|
||||||
kube_dict = kube.to_dict()
|
kube_dict = kube.to_dict()
|
||||||
if kube_dict.get('target') and kube_dict.get('state') == 'active':
|
if kube_dict.get('target') and kube_dict.get('state') == 'active':
|
||||||
active_kube_version = kube_dict.get('version')
|
matching_kube_version = kube_dict.get('version')
|
||||||
break
|
break
|
||||||
return active_kube_version
|
return matching_kube_version
|
||||||
|
|
||||||
|
|
||||||
|
def get_available_kube_version(kube_versions):
|
||||||
|
"""Returns first available kubernetes version from a list of versions"""
|
||||||
|
|
||||||
|
matching_kube_version = None
|
||||||
|
for kube in kube_versions:
|
||||||
|
kube_dict = kube.to_dict()
|
||||||
|
if kube_dict.get('state') == 'available':
|
||||||
|
matching_kube_version = kube_dict.get('version')
|
||||||
|
break
|
||||||
|
return matching_kube_version
|
||||||
|
|
||||||
|
|
||||||
|
def kube_version_compare(left, right):
|
||||||
|
"""Performs a cmp operation for two kubernetes versions
|
||||||
|
|
||||||
|
Return -1, 0, or 1 if left is less, equal, or greater than right
|
||||||
|
|
||||||
|
left and right are semver strings starting with the letter 'v'
|
||||||
|
If either value is None, an exception is raised
|
||||||
|
If the strings are not 'v'major.minor.micro, an exception is raised
|
||||||
|
Note: This method supports shorter versions. ex: v1.22
|
||||||
|
When comparing different length tuples, additional fields are ignored.
|
||||||
|
For example: v1.19 and v1.19.1 would be the same.
|
||||||
|
"""
|
||||||
|
if left is None or right is None or left[0] != 'v' or right[0] != 'v':
|
||||||
|
raise Exception("Invalid kube version(s), left: (%s), right: (%s)" %
|
||||||
|
(left, right))
|
||||||
|
# start the split at index 1 ('after' the 'v' character)
|
||||||
|
l_val = tuple(map(int, (left[1:].split("."))))
|
||||||
|
r_val = tuple(map(int, (right[1:].split("."))))
|
||||||
|
# If the tuples are different length, convert both to the same length
|
||||||
|
min_tuple = min(len(l_val), len(r_val))
|
||||||
|
l_val = l_val[0:min_tuple]
|
||||||
|
r_val = r_val[0:min_tuple]
|
||||||
|
# The following is the same as cmp. Verified in python2 and python3
|
||||||
|
# cmp does not exist in python3.
|
||||||
|
return (l_val > r_val) - (l_val < r_val)
|
||||||
|
|
||||||
|
|
||||||
def get_loads_for_patching(loads):
|
def get_loads_for_patching(loads):
|
||||||
|
|||||||
@@ -19,30 +19,19 @@ from dcmanager.common import consts
|
|||||||
from dcmanager.orchestrator.orch_thread import OrchThread
|
from dcmanager.orchestrator.orch_thread import OrchThread
|
||||||
from dcmanager.orchestrator.states.kube.applying_vim_kube_upgrade_strategy \
|
from dcmanager.orchestrator.states.kube.applying_vim_kube_upgrade_strategy \
|
||||||
import ApplyingVIMKubeUpgradeStrategyState
|
import ApplyingVIMKubeUpgradeStrategyState
|
||||||
from dcmanager.orchestrator.states.kube.applying_vim_patch_strategy \
|
|
||||||
import ApplyingVIMPatchStrategyState
|
|
||||||
from dcmanager.orchestrator.states.kube.creating_vim_kube_upgrade_strategy \
|
from dcmanager.orchestrator.states.kube.creating_vim_kube_upgrade_strategy \
|
||||||
import CreatingVIMKubeUpgradeStrategyState
|
import CreatingVIMKubeUpgradeStrategyState
|
||||||
from dcmanager.orchestrator.states.kube.creating_vim_patch_strategy \
|
from dcmanager.orchestrator.states.kube.pre_check \
|
||||||
import CreatingVIMPatchStrategyState
|
import KubeUpgradePreCheckState
|
||||||
from dcmanager.orchestrator.states.kube.deleting_vim_patch_strategy \
|
|
||||||
import DeletingVIMPatchStrategyState
|
|
||||||
from dcmanager.orchestrator.states.kube.updating_kube_patches \
|
|
||||||
import UpdatingKubePatchesState
|
|
||||||
|
|
||||||
|
|
||||||
class KubeUpgradeOrchThread(OrchThread):
|
class KubeUpgradeOrchThread(OrchThread):
|
||||||
"""Kube Upgrade Orchestration Thread"""
|
"""Kube Upgrade Orchestration Thread"""
|
||||||
# every state in kube orchestration must have an operator
|
# every state in kube orchestration must have an operator
|
||||||
|
# The states are listed here in their typical execution order
|
||||||
STATE_OPERATORS = {
|
STATE_OPERATORS = {
|
||||||
consts.STRATEGY_STATE_KUBE_UPDATING_PATCHES:
|
consts.STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK:
|
||||||
UpdatingKubePatchesState,
|
KubeUpgradePreCheckState,
|
||||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_PATCH_STRATEGY:
|
|
||||||
CreatingVIMPatchStrategyState,
|
|
||||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_PATCH_STRATEGY:
|
|
||||||
ApplyingVIMPatchStrategyState,
|
|
||||||
consts.STRATEGY_STATE_KUBE_DELETING_VIM_PATCH_STRATEGY:
|
|
||||||
DeletingVIMPatchStrategyState,
|
|
||||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY:
|
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY:
|
||||||
CreatingVIMKubeUpgradeStrategyState,
|
CreatingVIMKubeUpgradeStrategyState,
|
||||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY:
|
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY:
|
||||||
@@ -55,7 +44,7 @@ class KubeUpgradeOrchThread(OrchThread):
|
|||||||
audit_rpc_client,
|
audit_rpc_client,
|
||||||
consts.SW_UPDATE_TYPE_KUBERNETES,
|
consts.SW_UPDATE_TYPE_KUBERNETES,
|
||||||
vim.STRATEGY_NAME_KUBE_UPGRADE,
|
vim.STRATEGY_NAME_KUBE_UPGRADE,
|
||||||
consts.STRATEGY_STATE_KUBE_UPDATING_PATCHES)
|
consts.STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK)
|
||||||
|
|
||||||
def trigger_audit(self):
|
def trigger_audit(self):
|
||||||
"""Trigger an audit for kubernetes"""
|
"""Trigger an audit for kubernetes"""
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
from dccommon.drivers.openstack import vim
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.orchestrator.states.applying_vim_strategy \
|
|
||||||
import ApplyingVIMStrategyState
|
|
||||||
|
|
||||||
|
|
||||||
class ApplyingVIMPatchStrategyState(ApplyingVIMStrategyState):
|
|
||||||
"""State for applying the VIM patch strategy during kube upgrade."""
|
|
||||||
|
|
||||||
def __init__(self, region_name):
|
|
||||||
super(ApplyingVIMPatchStrategyState, self).__init__(
|
|
||||||
next_state=consts.STRATEGY_STATE_KUBE_DELETING_VIM_PATCH_STRATEGY,
|
|
||||||
region_name=region_name,
|
|
||||||
strategy_name=vim.STRATEGY_NAME_SW_PATCH)
|
|
||||||
@@ -21,23 +21,25 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState):
|
|||||||
region_name=region_name,
|
region_name=region_name,
|
||||||
strategy_name=vim.STRATEGY_NAME_KUBE_UPGRADE)
|
strategy_name=vim.STRATEGY_NAME_KUBE_UPGRADE)
|
||||||
|
|
||||||
def get_target_kube_version(self, strategy_step):
|
|
||||||
kube_versions = self.get_sysinv_client(
|
|
||||||
consts.DEFAULT_REGION_NAME).get_kube_versions()
|
|
||||||
active_kube_version = dcmanager_utils.get_active_kube_version(
|
|
||||||
kube_versions)
|
|
||||||
if active_kube_version is None:
|
|
||||||
message = "Active kube version in RegionOne not found"
|
|
||||||
self.warn_log(strategy_step, message)
|
|
||||||
raise Exception(message)
|
|
||||||
return active_kube_version
|
|
||||||
|
|
||||||
def _create_vim_strategy(self, strategy_step, region):
|
def _create_vim_strategy(self, strategy_step, region):
|
||||||
self.info_log(strategy_step,
|
self.info_log(strategy_step,
|
||||||
"Creating (%s) VIM strategy" % self.strategy_name)
|
"Creating (%s) VIM strategy" % self.strategy_name)
|
||||||
|
target_kube_version = None
|
||||||
|
|
||||||
# determine the target for the vim kube strategy
|
# If there is an existing kube upgrade object, its to_version is used
|
||||||
active_kube_version = self.get_target_kube_version(strategy_step)
|
# This is to allow resume for a kube upgrade
|
||||||
|
subcloud_kube_upgrades = \
|
||||||
|
self.get_sysinv_client(region).get_kube_upgrades()
|
||||||
|
if len(subcloud_kube_upgrades) > 0:
|
||||||
|
target_kube_version = subcloud_kube_upgrades[0].to_version
|
||||||
|
else:
|
||||||
|
# Creating a new kube upgrade, rather than resuming.
|
||||||
|
# Subcloud can only be upgraded to its available version
|
||||||
|
# Pre-Check does rejection logic.
|
||||||
|
kube_versions = \
|
||||||
|
self.get_sysinv_client(region).get_kube_versions()
|
||||||
|
target_kube_version = \
|
||||||
|
dcmanager_utils.get_available_kube_version(kube_versions)
|
||||||
|
|
||||||
# Get the update options
|
# Get the update options
|
||||||
opts_dict = dcmanager_utils.get_sw_update_opts(
|
opts_dict = dcmanager_utils.get_sw_update_opts(
|
||||||
@@ -53,7 +55,7 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState):
|
|||||||
opts_dict['max-parallel-workers'],
|
opts_dict['max-parallel-workers'],
|
||||||
opts_dict['default-instance-action'],
|
opts_dict['default-instance-action'],
|
||||||
opts_dict['alarm-restriction-type'],
|
opts_dict['alarm-restriction-type'],
|
||||||
to_version=active_kube_version)
|
to_version=target_kube_version)
|
||||||
|
|
||||||
# a successful API call to create MUST set the state be 'building'
|
# a successful API call to create MUST set the state be 'building'
|
||||||
if subcloud_strategy.state != vim.STATE_BUILDING:
|
if subcloud_strategy.state != vim.STATE_BUILDING:
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
from dccommon.drivers.openstack import vim
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.orchestrator.states.creating_vim_strategy \
|
|
||||||
import CreatingVIMStrategyState
|
|
||||||
|
|
||||||
|
|
||||||
class CreatingVIMPatchStrategyState(CreatingVIMStrategyState):
|
|
||||||
"""State for creating the VIM patch strategy prior to kube upgrade."""
|
|
||||||
|
|
||||||
def __init__(self, region_name):
|
|
||||||
next_state = consts.STRATEGY_STATE_KUBE_APPLYING_VIM_PATCH_STRATEGY
|
|
||||||
super(CreatingVIMPatchStrategyState, self).__init__(
|
|
||||||
next_state=next_state,
|
|
||||||
region_name=region_name,
|
|
||||||
strategy_name=vim.STRATEGY_NAME_SW_PATCH)
|
|
||||||
self.SKIP_REASON = "no software patches need to be applied"
|
|
||||||
self.SKIP_STATE = \
|
|
||||||
consts.STRATEGY_STATE_KUBE_DELETING_VIM_PATCH_STRATEGY
|
|
||||||
|
|
||||||
def skip_check(self, strategy_step, subcloud_strategy):
|
|
||||||
"""Check if the vim strategy does not need to be built.
|
|
||||||
|
|
||||||
If the vim_strategy that was constructed returns a failure, and
|
|
||||||
the reason for the failure is expected, the state machine can skip
|
|
||||||
past this vim strategy create/apply and simply delete and move on.
|
|
||||||
|
|
||||||
That happens when the subcloud is already considered up-to-date for
|
|
||||||
its patches based on what the vim calculates for the applies patches
|
|
||||||
|
|
||||||
This method will skip if "no software patches need to be applied'
|
|
||||||
"""
|
|
||||||
|
|
||||||
if subcloud_strategy is not None:
|
|
||||||
if subcloud_strategy.state == vim.STATE_BUILD_FAILED:
|
|
||||||
if subcloud_strategy.build_phase.reason == self.SKIP_REASON:
|
|
||||||
self.info_log(strategy_step,
|
|
||||||
"Skip forward in state machine due to:(%s)"
|
|
||||||
% subcloud_strategy.build_phase.reason)
|
|
||||||
return self.SKIP_STATE
|
|
||||||
# If we get here, there is not a reason to skip
|
|
||||||
return None
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
from dccommon.drivers.openstack import vim
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.common.exceptions import KubeUpgradeFailedException
|
|
||||||
from dcmanager.orchestrator.states.base import BaseState
|
|
||||||
|
|
||||||
|
|
||||||
class DeletingVIMPatchStrategyState(BaseState):
|
|
||||||
"""State to delete vim patch strategy before creating vim kube strategy"""
|
|
||||||
|
|
||||||
def __init__(self, region_name):
|
|
||||||
next_state = \
|
|
||||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
|
||||||
super(DeletingVIMPatchStrategyState, self).__init__(
|
|
||||||
next_state=next_state,
|
|
||||||
region_name=region_name)
|
|
||||||
|
|
||||||
def perform_state_action(self, strategy_step):
|
|
||||||
"""Delete the VIM patch strategy if it exists.
|
|
||||||
|
|
||||||
Returns the next state in the state machine on success.
|
|
||||||
Any exceptions raised by this method set the strategy to FAILED.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.info_log(strategy_step, "Delete vim patch strategy if it exists")
|
|
||||||
region = self.get_region_name(strategy_step)
|
|
||||||
strategy_name = vim.STRATEGY_NAME_SW_PATCH
|
|
||||||
|
|
||||||
vim_strategy = self.get_vim_client(region).get_strategy(
|
|
||||||
strategy_name=strategy_name,
|
|
||||||
raise_error_if_missing=False)
|
|
||||||
|
|
||||||
# If the vim patch strategy does not exist, there is nothing to delete
|
|
||||||
if vim_strategy is None:
|
|
||||||
self.info_log(strategy_step, "Skip. No vim patch strategy exists")
|
|
||||||
else:
|
|
||||||
self.info_log(strategy_step, "Deleting vim patch strategy")
|
|
||||||
# The vim patch strategy cannot be deleted in certain states
|
|
||||||
if vim_strategy.state in [vim.STATE_BUILDING,
|
|
||||||
vim.STATE_APPLYING,
|
|
||||||
vim.STATE_ABORTING]:
|
|
||||||
# Can't delete a strategy in these states
|
|
||||||
message = ("VIM patch strategy in wrong state:(%s) to delete"
|
|
||||||
% vim_strategy.state)
|
|
||||||
raise KubeUpgradeFailedException(
|
|
||||||
subcloud=self.region_name,
|
|
||||||
details=message)
|
|
||||||
# delete the vim patch strategy
|
|
||||||
self.get_vim_client(region).delete_strategy(
|
|
||||||
strategy_name=strategy_name)
|
|
||||||
|
|
||||||
# Success
|
|
||||||
return self.next_state
|
|
||||||
120
distributedcloud/dcmanager/orchestrator/states/kube/pre_check.py
Normal file
120
distributedcloud/dcmanager/orchestrator/states/kube/pre_check.py
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
from dcmanager.common.consts import DEFAULT_REGION_NAME
|
||||||
|
from dcmanager.common.consts import STRATEGY_STATE_COMPLETE
|
||||||
|
from dcmanager.common.consts \
|
||||||
|
import STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||||
|
from dcmanager.common import utils
|
||||||
|
from dcmanager.orchestrator.states.base import BaseState
|
||||||
|
|
||||||
|
|
||||||
|
class KubeUpgradePreCheckState(BaseState):
|
||||||
|
"""Perform pre check operations to determine if kube upgrade is required"""
|
||||||
|
|
||||||
|
def __init__(self, region_name):
|
||||||
|
super(KubeUpgradePreCheckState, self).__init__(
|
||||||
|
next_state=STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||||
|
region_name=region_name)
|
||||||
|
|
||||||
|
def perform_state_action(self, strategy_step):
|
||||||
|
"""This state will determine the starting state for kube upgrade
|
||||||
|
|
||||||
|
A subcloud will be out-of-sync if its version does not match the
|
||||||
|
system controller version, however it may be a higher version.
|
||||||
|
|
||||||
|
Subclouds at a higher version than the to-version will be skipped.
|
||||||
|
|
||||||
|
If the strategy contains the extra_args: 'to-version',
|
||||||
|
the subcloud can be upgraded if the 'available' version is
|
||||||
|
less than or equal to that version.
|
||||||
|
|
||||||
|
If a subcloud has an upgrade in progress, its to-version is compared
|
||||||
|
rather than the 'available' version in the subcloud. This allows
|
||||||
|
a partially upgraded subcloud to be skipped.
|
||||||
|
"""
|
||||||
|
# Get any existing kubernetes upgrade operation in the subcloud,
|
||||||
|
# and use its to-version rather than the 'available' version for
|
||||||
|
# determining whether or not to skip.
|
||||||
|
subcloud_kube_upgrades = \
|
||||||
|
self.get_sysinv_client(self.region_name).get_kube_upgrades()
|
||||||
|
if len(subcloud_kube_upgrades) > 0:
|
||||||
|
target_version = subcloud_kube_upgrades[0].to_version
|
||||||
|
self.debug_log(strategy_step,
|
||||||
|
"Pre-Check. Existing Kubernetes upgrade:(%s) exists"
|
||||||
|
% target_version)
|
||||||
|
else:
|
||||||
|
# The subcloud can only be upgraded to an 'available' version
|
||||||
|
subcloud_kube_versions = \
|
||||||
|
self.get_sysinv_client(self.region_name).get_kube_versions()
|
||||||
|
target_version = \
|
||||||
|
utils.get_available_kube_version(subcloud_kube_versions)
|
||||||
|
self.debug_log(strategy_step,
|
||||||
|
"Pre-Check. Available Kubernetes upgrade:(%s)"
|
||||||
|
% target_version)
|
||||||
|
|
||||||
|
# check extra_args for the strategy
|
||||||
|
# if there is a to-version, use that when checking against the subcloud
|
||||||
|
# target version, otherwise compare to the sytem controller version
|
||||||
|
# to determine if this subcloud is permitted to upgrade.
|
||||||
|
extra_args = utils.get_sw_update_strategy_extra_args(self.context)
|
||||||
|
if extra_args is None:
|
||||||
|
extra_args = {}
|
||||||
|
to_version = extra_args.get('to-version', None)
|
||||||
|
if to_version is None:
|
||||||
|
sys_kube_versions = \
|
||||||
|
self.get_sysinv_client(DEFAULT_REGION_NAME).get_kube_versions()
|
||||||
|
to_version = utils.get_active_kube_version(sys_kube_versions)
|
||||||
|
if to_version is None:
|
||||||
|
# No active target kube version on the system controller means
|
||||||
|
# the system controller is part-way through a kube upgrade
|
||||||
|
message = "System Controller has no active target kube version"
|
||||||
|
self.warn_log(strategy_step, message)
|
||||||
|
raise Exception(message)
|
||||||
|
|
||||||
|
# For the to-version, the code currently allows a partial version
|
||||||
|
# ie: v1.20 or a version that is much higher than is installed.
|
||||||
|
# This allows flexability when passing in a to-version.
|
||||||
|
|
||||||
|
# The 'to-version' is the desired version to upgrade the subcloud.
|
||||||
|
# The 'target_version' is what the subcloud is allowed to upgrade to.
|
||||||
|
# if the 'target_version' is already greater than the 'to-version' then
|
||||||
|
# we want to skip this subcloud.
|
||||||
|
#
|
||||||
|
# Example: subcloud 'target_version' is 1.20.9 , to-version is 1.19.13
|
||||||
|
# so the upgrade should be skipped.
|
||||||
|
#
|
||||||
|
# Example2: subcloud 'target_version' is 1.19.13, to-version is 1.20.9
|
||||||
|
# so the upgrade should be invoked, but will only move to 1.19.13.
|
||||||
|
# Another upgrade would be needed for the versions to match.
|
||||||
|
#
|
||||||
|
# Example3: subcloud 'target_version': None. The upgrade is skipped.
|
||||||
|
# The subcloud is already upgraded as far as it can go/
|
||||||
|
|
||||||
|
should_skip = False
|
||||||
|
if target_version is None:
|
||||||
|
should_skip = True
|
||||||
|
else:
|
||||||
|
# -1 if target_version is less. 0 means equal. 1 means greater
|
||||||
|
# Should skip is the target_version is already greater
|
||||||
|
if 1 == utils.kube_version_compare(target_version, to_version):
|
||||||
|
should_skip = True
|
||||||
|
|
||||||
|
# the default next state is to create the vim strategy
|
||||||
|
# if there is no need to upgrade, short circuit to complete.
|
||||||
|
if should_skip:
|
||||||
|
# Add a log indicating we are skipping (and why)
|
||||||
|
self.override_next_state(STRATEGY_STATE_COMPLETE)
|
||||||
|
self.info_log(strategy_step,
|
||||||
|
"Pre-Check Skip. Orchestration To-Version:(%s). "
|
||||||
|
"Subcloud To-Version:(%s)"
|
||||||
|
% (to_version, target_version))
|
||||||
|
else:
|
||||||
|
# Add a log indicating what we expect the next state to 'target'
|
||||||
|
self.info_log(strategy_step,
|
||||||
|
"Pre-Check Pass. Orchestration To-Version:(%s). "
|
||||||
|
" Subcloud To-Version:(%s)"
|
||||||
|
% (to_version, target_version))
|
||||||
|
return self.next_state
|
||||||
@@ -1,186 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
|
|
||||||
from dccommon.drivers.openstack import patching_v1
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.common.exceptions import StrategyStoppedException
|
|
||||||
from dcmanager.common import utils
|
|
||||||
from dcmanager.orchestrator.states.base import BaseState
|
|
||||||
|
|
||||||
# Max time: 30 minutes = 180 queries x 10 seconds between
|
|
||||||
DEFAULT_MAX_QUERIES = 180
|
|
||||||
DEFAULT_SLEEP_DURATION = 10
|
|
||||||
|
|
||||||
|
|
||||||
class UpdatingKubePatchesState(BaseState):
|
|
||||||
"""Kube upgrade state for updating patches"""
|
|
||||||
|
|
||||||
def __init__(self, region_name):
|
|
||||||
super(UpdatingKubePatchesState, self).__init__(
|
|
||||||
next_state=consts.STRATEGY_STATE_KUBE_CREATING_VIM_PATCH_STRATEGY,
|
|
||||||
region_name=region_name)
|
|
||||||
# max time to wait (in seconds) is: sleep_duration * max_queries
|
|
||||||
self.sleep_duration = DEFAULT_SLEEP_DURATION
|
|
||||||
self.max_queries = DEFAULT_MAX_QUERIES
|
|
||||||
|
|
||||||
def perform_state_action(self, strategy_step):
|
|
||||||
"""Update patches in this subcloud required for kubernetes upgrade.
|
|
||||||
|
|
||||||
Returns the next state in the state machine on success.
|
|
||||||
Any exceptions raised by this method set the strategy to FAILED.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.info_log(strategy_step, "Updating kube patches")
|
|
||||||
region = self.get_region_name(strategy_step)
|
|
||||||
|
|
||||||
# query RegionOne patches
|
|
||||||
regionone_patches = self.get_patching_client(
|
|
||||||
consts.DEFAULT_REGION_NAME).query()
|
|
||||||
|
|
||||||
# Query RegionOne loads to filter the patches
|
|
||||||
loads = self.get_sysinv_client(consts.DEFAULT_REGION_NAME).get_loads()
|
|
||||||
|
|
||||||
# this filters by active and imported loads
|
|
||||||
installed_loads = utils.get_loads_for_patching(loads)
|
|
||||||
|
|
||||||
# Query RegionOne active kube version to examine the patches
|
|
||||||
kube_versions = self.get_sysinv_client(
|
|
||||||
consts.DEFAULT_REGION_NAME).get_kube_versions()
|
|
||||||
active_kube_version = utils.get_active_kube_version(kube_versions)
|
|
||||||
if active_kube_version is None:
|
|
||||||
message = "Active kube version in RegionOne not found"
|
|
||||||
self.warn_log(strategy_step, message)
|
|
||||||
raise Exception(message)
|
|
||||||
|
|
||||||
kube_ver = self.get_sysinv_client(
|
|
||||||
consts.DEFAULT_REGION_NAME).get_kube_version(active_kube_version)
|
|
||||||
kube_details = kube_ver.to_dict()
|
|
||||||
|
|
||||||
# filter the active patches
|
|
||||||
filtered_region_one_patches = list()
|
|
||||||
applyable_region_one_patches = list()
|
|
||||||
for patch_id in regionone_patches.keys():
|
|
||||||
# Only the patches for the installed loads will be examined
|
|
||||||
if regionone_patches[patch_id]['sw_version'] in installed_loads:
|
|
||||||
# Only care about applied/committed patches
|
|
||||||
if regionone_patches[patch_id]['repostate'] in [
|
|
||||||
patching_v1.PATCH_STATE_APPLIED,
|
|
||||||
patching_v1.PATCH_STATE_COMMITTED]:
|
|
||||||
filtered_region_one_patches.append(patch_id)
|
|
||||||
# "available_patches" should not be applied
|
|
||||||
if patch_id not in kube_details.get("available_patches"):
|
|
||||||
applyable_region_one_patches.append(patch_id)
|
|
||||||
|
|
||||||
# Retrieve all the patches that are present in this subcloud.
|
|
||||||
subcloud_patches = self.get_patching_client(region).query()
|
|
||||||
|
|
||||||
# Not all applied patches can be applied in the subcloud
|
|
||||||
# kube patch orchestration requires the vim strategy to apply some
|
|
||||||
# No patches are being removed at this time.
|
|
||||||
patches_to_upload = list()
|
|
||||||
patches_to_apply = list()
|
|
||||||
|
|
||||||
subcloud_patch_ids = list(subcloud_patches.keys())
|
|
||||||
for patch_id in subcloud_patch_ids:
|
|
||||||
if subcloud_patches[patch_id]['repostate'] == \
|
|
||||||
patching_v1.PATCH_STATE_APPLIED:
|
|
||||||
# todo(abailey): determine if we want to support remove
|
|
||||||
pass
|
|
||||||
elif subcloud_patches[patch_id]['repostate'] == \
|
|
||||||
patching_v1.PATCH_STATE_COMMITTED:
|
|
||||||
# todo(abailey): determine if mismatch committed subcloud
|
|
||||||
# patches should cause failure
|
|
||||||
pass
|
|
||||||
elif subcloud_patches[patch_id]['repostate'] == \
|
|
||||||
patching_v1.PATCH_STATE_AVAILABLE:
|
|
||||||
# No need to upload. May need to apply
|
|
||||||
if patch_id in applyable_region_one_patches:
|
|
||||||
self.info_log(strategy_step,
|
|
||||||
"Patch %s will be applied" % patch_id)
|
|
||||||
patches_to_apply.append(patch_id)
|
|
||||||
else:
|
|
||||||
# This patch is in an invalid state
|
|
||||||
message = ('Patch %s in subcloud in unexpected state %s' %
|
|
||||||
(patch_id, subcloud_patches[patch_id]['repostate']))
|
|
||||||
self.warn_log(strategy_step, message)
|
|
||||||
raise Exception(message)
|
|
||||||
|
|
||||||
# Check that all uploaded patches in RegionOne are in subcloud
|
|
||||||
for patch_id in filtered_region_one_patches:
|
|
||||||
if patch_id not in subcloud_patch_ids:
|
|
||||||
patches_to_upload.append(patch_id)
|
|
||||||
|
|
||||||
# Check that all applyable patches in RegionOne are in subcloud
|
|
||||||
for patch_id in applyable_region_one_patches:
|
|
||||||
if patch_id not in subcloud_patch_ids:
|
|
||||||
patches_to_apply.append(patch_id)
|
|
||||||
|
|
||||||
if patches_to_upload:
|
|
||||||
self.info_log(strategy_step,
|
|
||||||
"Uploading patches %s to subcloud"
|
|
||||||
% patches_to_upload)
|
|
||||||
for patch in patches_to_upload:
|
|
||||||
patch_sw_version = regionone_patches[patch]['sw_version']
|
|
||||||
patch_file = "%s/%s/%s.patch" % (consts.PATCH_VAULT_DIR,
|
|
||||||
patch_sw_version,
|
|
||||||
patch)
|
|
||||||
if not os.path.isfile(patch_file):
|
|
||||||
message = ('Patch file %s is missing' % patch_file)
|
|
||||||
self.error_log(strategy_step, message)
|
|
||||||
raise Exception(message)
|
|
||||||
|
|
||||||
self.get_patching_client(region).upload([patch_file])
|
|
||||||
|
|
||||||
if self.stopped():
|
|
||||||
self.info_log(strategy_step,
|
|
||||||
"Exiting because task is stopped")
|
|
||||||
raise StrategyStoppedException()
|
|
||||||
|
|
||||||
if patches_to_apply:
|
|
||||||
self.info_log(strategy_step,
|
|
||||||
"Applying patches %s to subcloud"
|
|
||||||
% patches_to_apply)
|
|
||||||
self.get_patching_client(region).apply(patches_to_apply)
|
|
||||||
|
|
||||||
# Now that we have applied/uploaded patches, we need to give
|
|
||||||
# the patch controller on this subcloud time to determine whether
|
|
||||||
# each host on that subcloud is patch current.
|
|
||||||
wait_count = 0
|
|
||||||
while True:
|
|
||||||
subcloud_hosts = self.get_patching_client(region).query_hosts()
|
|
||||||
|
|
||||||
self.debug_log(strategy_step,
|
|
||||||
"query_hosts for subcloud returned %s"
|
|
||||||
% subcloud_hosts)
|
|
||||||
for host in subcloud_hosts:
|
|
||||||
if host['interim_state']:
|
|
||||||
# This host is not yet ready.
|
|
||||||
self.debug_log(strategy_step,
|
|
||||||
"Host %s in subcloud in interim state"
|
|
||||||
% host["hostname"])
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
# All hosts in the subcloud are updated
|
|
||||||
break
|
|
||||||
wait_count += 1
|
|
||||||
if wait_count >= 6:
|
|
||||||
# We have waited at least 60 seconds. This is too long. We
|
|
||||||
# will just log it and move on without failing the step.
|
|
||||||
message = ("Too much time expired after applying patches to "
|
|
||||||
"subcloud - continuing.")
|
|
||||||
self.warn_log(strategy_step, message)
|
|
||||||
break
|
|
||||||
|
|
||||||
if self.stopped():
|
|
||||||
self.info_log(strategy_step, "Exiting because task is stopped")
|
|
||||||
raise StrategyStoppedException()
|
|
||||||
|
|
||||||
# Wait 10 seconds before doing another query.
|
|
||||||
time.sleep(10)
|
|
||||||
|
|
||||||
return self.next_state
|
|
||||||
@@ -125,10 +125,17 @@ class SwUpdateManager(manager.Manager):
|
|||||||
subcloud_status.sync_status ==
|
subcloud_status.sync_status ==
|
||||||
consts.SYNC_STATUS_OUT_OF_SYNC)
|
consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||||
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
|
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
|
||||||
return (subcloud_status.endpoint_type ==
|
if force:
|
||||||
dcorch_consts.ENDPOINT_TYPE_KUBERNETES and
|
# run for in-sync and out-of-sync (but not unknown)
|
||||||
subcloud_status.sync_status ==
|
return (subcloud_status.endpoint_type ==
|
||||||
consts.SYNC_STATUS_OUT_OF_SYNC)
|
dcorch_consts.ENDPOINT_TYPE_KUBERNETES and
|
||||||
|
subcloud_status.sync_status !=
|
||||||
|
consts.SYNC_STATUS_UNKNOWN)
|
||||||
|
else:
|
||||||
|
return (subcloud_status.endpoint_type ==
|
||||||
|
dcorch_consts.ENDPOINT_TYPE_KUBERNETES and
|
||||||
|
subcloud_status.sync_status ==
|
||||||
|
consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||||
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
|
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
|
||||||
if force:
|
if force:
|
||||||
# run for in-sync and out-of-sync (but not unknown)
|
# run for in-sync and out-of-sync (but not unknown)
|
||||||
@@ -314,13 +321,18 @@ class SwUpdateManager(manager.Manager):
|
|||||||
msg='Subcloud %s does not require firmware update'
|
msg='Subcloud %s does not require firmware update'
|
||||||
% cloud_name)
|
% cloud_name)
|
||||||
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
|
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
|
||||||
subcloud_status = db_api.subcloud_status_get(
|
if force:
|
||||||
context, subcloud.id, dcorch_consts.ENDPOINT_TYPE_KUBERNETES)
|
# force means we do not care about the status
|
||||||
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
|
pass
|
||||||
raise exceptions.BadRequest(
|
else:
|
||||||
resource='strategy',
|
subcloud_status = db_api.subcloud_status_get(
|
||||||
msg='Subcloud %s does not require kubernetes update'
|
context, subcloud.id,
|
||||||
% cloud_name)
|
dcorch_consts.ENDPOINT_TYPE_KUBERNETES)
|
||||||
|
if subcloud_status.sync_status == consts.SYNC_STATUS_IN_SYNC:
|
||||||
|
raise exceptions.BadRequest(
|
||||||
|
resource='strategy',
|
||||||
|
msg='Subcloud %s does not require kubernetes update'
|
||||||
|
% cloud_name)
|
||||||
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
|
elif strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
|
||||||
if force:
|
if force:
|
||||||
# force means we do not care about the status
|
# force means we do not care about the status
|
||||||
@@ -356,6 +368,11 @@ class SwUpdateManager(manager.Manager):
|
|||||||
consts.EXTRA_ARGS_CERT_FILE:
|
consts.EXTRA_ARGS_CERT_FILE:
|
||||||
payload.get(consts.EXTRA_ARGS_CERT_FILE),
|
payload.get(consts.EXTRA_ARGS_CERT_FILE),
|
||||||
}
|
}
|
||||||
|
elif strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
|
||||||
|
extra_args = {
|
||||||
|
consts.EXTRA_ARGS_TO_VERSION:
|
||||||
|
payload.get(consts.EXTRA_ARGS_TO_VERSION),
|
||||||
|
}
|
||||||
|
|
||||||
# Don't create a strategy if any of the subclouds is online and the
|
# Don't create a strategy if any of the subclouds is online and the
|
||||||
# relevant sync status is unknown. Offline subcloud is skipped unless
|
# relevant sync status is unknown. Offline subcloud is skipped unless
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ PREVIOUS_VERSION = '12.34'
|
|||||||
UPGRADED_VERSION = '56.78'
|
UPGRADED_VERSION = '56.78'
|
||||||
|
|
||||||
PREVIOUS_KUBE_VERSION = 'v1.2.3'
|
PREVIOUS_KUBE_VERSION = 'v1.2.3'
|
||||||
UPGRADED_KUBE_VERSION = 'v1.2.3-a'
|
UPGRADED_KUBE_VERSION = 'v1.2.4'
|
||||||
|
|
||||||
FAKE_VENDOR = '8086'
|
FAKE_VENDOR = '8086'
|
||||||
FAKE_DEVICE = '0b30'
|
FAKE_DEVICE = '0b30'
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
|
||||||
import TestKubeUpgradeState
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.test_applying_vim_strategy \
|
|
||||||
import ApplyingVIMStrategyMixin
|
|
||||||
|
|
||||||
|
|
||||||
class TestApplyingVIMPatchStrategyStage(ApplyingVIMStrategyMixin,
|
|
||||||
TestKubeUpgradeState):
|
|
||||||
"""This test applies the patch vim strategy during kube upgrade"""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestApplyingVIMPatchStrategyStage, self).setUp()
|
|
||||||
self.set_state(
|
|
||||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_PATCH_STRATEGY,
|
|
||||||
consts.STRATEGY_STATE_KUBE_DELETING_VIM_PATCH_STRATEGY)
|
|
||||||
@@ -7,6 +7,10 @@ import mock
|
|||||||
|
|
||||||
from dcmanager.common import consts
|
from dcmanager.common import consts
|
||||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeVersion
|
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeVersion
|
||||||
|
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||||
|
import PREVIOUS_KUBE_VERSION
|
||||||
|
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||||
|
import UPGRADED_KUBE_VERSION
|
||||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
||||||
import TestKubeUpgradeState
|
import TestKubeUpgradeState
|
||||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy \
|
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy \
|
||||||
@@ -23,7 +27,19 @@ class TestCreatingVIMKubeUpgradeStrategyStage(CreatingVIMStrategyStageMixin,
|
|||||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY)
|
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY)
|
||||||
|
|
||||||
|
# creating the vim strategy checks if an existing upgrade exists
|
||||||
|
self.sysinv_client.get_kube_upgrades = mock.MagicMock()
|
||||||
|
self.sysinv_client.get_kube_upgrades.return_value = []
|
||||||
|
|
||||||
|
# when no vim strategy exists, the available version is used
|
||||||
self.sysinv_client.get_kube_versions = mock.MagicMock()
|
self.sysinv_client.get_kube_versions = mock.MagicMock()
|
||||||
self.sysinv_client.get_kube_versions.return_value = [
|
self.sysinv_client.get_kube_versions.return_value = [
|
||||||
FakeKubeVersion(),
|
FakeKubeVersion(obj_id=1,
|
||||||
|
version=PREVIOUS_KUBE_VERSION,
|
||||||
|
target=True,
|
||||||
|
state='active'),
|
||||||
|
FakeKubeVersion(obj_id=2,
|
||||||
|
version=UPGRADED_KUBE_VERSION,
|
||||||
|
target=False,
|
||||||
|
state='available'),
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
|
||||||
import TestKubeUpgradeState
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy \
|
|
||||||
import CreatingVIMStrategyStageMixin
|
|
||||||
|
|
||||||
|
|
||||||
class TestCreatingVIMPatchStrategyStage(CreatingVIMStrategyStageMixin,
|
|
||||||
TestKubeUpgradeState):
|
|
||||||
"""Test a VIM Patch Strategy during Kube upgrade orchestration"""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestCreatingVIMPatchStrategyStage, self).setUp()
|
|
||||||
self.set_state(
|
|
||||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_PATCH_STRATEGY,
|
|
||||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_PATCH_STRATEGY)
|
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from dccommon.drivers.openstack import vim
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.tests.unit.fakes import FakeVimStrategy
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
|
||||||
import TestKubeUpgradeState
|
|
||||||
|
|
||||||
# We permit deleting a strategy that has completed or failed its action
|
|
||||||
DELETABLE_STRATEGY = FakeVimStrategy(state=vim.STATE_APPLIED)
|
|
||||||
|
|
||||||
# Not permitted to delete a strategy while it is partway through its action:
|
|
||||||
# 'BUILDING, APPLYING, ABORTING
|
|
||||||
UNDELETABLE_STRATEGY = FakeVimStrategy(state=vim.STATE_APPLYING)
|
|
||||||
|
|
||||||
|
|
||||||
class TestKubeDeletingVimPatchStrategyStage(TestKubeUpgradeState):
|
|
||||||
"Test deleting the vim patch strategy during kube orch."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestKubeDeletingVimPatchStrategyStage, self).setUp()
|
|
||||||
|
|
||||||
self.on_success_state = \
|
|
||||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
|
||||||
|
|
||||||
# Add the subcloud being processed by this unit test
|
|
||||||
self.subcloud = self.setup_subcloud()
|
|
||||||
|
|
||||||
# Add the strategy_step state being processed by this unit test
|
|
||||||
self.strategy_step = self.setup_strategy_step(
|
|
||||||
consts.STRATEGY_STATE_KUBE_DELETING_VIM_PATCH_STRATEGY)
|
|
||||||
|
|
||||||
# Add mock API endpoints for client calls invcked by this state
|
|
||||||
self.vim_client.get_strategy = mock.MagicMock()
|
|
||||||
self.vim_client.delete_strategy = mock.MagicMock()
|
|
||||||
|
|
||||||
def test_success_no_strategy_exists(self):
|
|
||||||
"""If there is no vim strategy, success. Skip to next state"""
|
|
||||||
|
|
||||||
# Mock that there is no strategy to delete
|
|
||||||
self.vim_client.get_strategy.return_value = None
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# verify the vim strategy delete was never invoked
|
|
||||||
self.vim_client.delete_strategy.assert_not_called()
|
|
||||||
|
|
||||||
# On success it should proceed to next state
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
self.on_success_state)
|
|
||||||
|
|
||||||
def test_success_strategy_exists(self):
|
|
||||||
"""If there is a deletable strategy, delete and go to next state"""
|
|
||||||
|
|
||||||
# Mock that there is a strategy to delete
|
|
||||||
self.vim_client.get_strategy.return_value = DELETABLE_STRATEGY
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# verify the vim strategy delete was invoked
|
|
||||||
self.vim_client.delete_strategy.assert_called()
|
|
||||||
|
|
||||||
# On success it should proceed to next state
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
self.on_success_state)
|
|
||||||
|
|
||||||
def test_failure_strategy_undeletable(self):
|
|
||||||
"""If there is a strategy that is in progress, cannot delete. Fail"""
|
|
||||||
|
|
||||||
# Mock that there is a strategy to delete that is still running
|
|
||||||
self.vim_client.get_strategy.return_value = UNDELETABLE_STRATEGY
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# verify the vim strategy delete was not invoked
|
|
||||||
self.vim_client.delete_strategy.assert_not_called()
|
|
||||||
|
|
||||||
# The strategy was in an un-deletable state, so this should have failed
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
consts.STRATEGY_STATE_FAILED)
|
|
||||||
|
|
||||||
def test_failure_vim_api_failure(self):
|
|
||||||
"""If delete strategy raises an exception, Fail."""
|
|
||||||
|
|
||||||
# Mock that there is a strategy to delete
|
|
||||||
self.vim_client.get_strategy.return_value = DELETABLE_STRATEGY
|
|
||||||
|
|
||||||
# Mock that the delete API call raises an exception
|
|
||||||
self.vim_client.delete_strategy.side_effect = \
|
|
||||||
Exception("vim delete strategy failed for some reason")
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# The strategy was in an un-deletable state, so this should have failed
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
consts.STRATEGY_STATE_FAILED)
|
|
||||||
@@ -0,0 +1,316 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
import mock
|
||||||
|
|
||||||
|
from dcmanager.common.consts import DEPLOY_STATE_DONE
|
||||||
|
from dcmanager.common.consts import STRATEGY_STATE_COMPLETE
|
||||||
|
from dcmanager.common.consts import STRATEGY_STATE_FAILED
|
||||||
|
from dcmanager.common.consts \
|
||||||
|
import STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||||
|
from dcmanager.common.consts import STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK
|
||||||
|
from dcmanager.db.sqlalchemy import api as db_api
|
||||||
|
|
||||||
|
from dcmanager.tests.unit.common import fake_strategy
|
||||||
|
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeUpgrade
|
||||||
|
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeVersion
|
||||||
|
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||||
|
import PREVIOUS_KUBE_VERSION
|
||||||
|
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||||
|
import UPGRADED_KUBE_VERSION
|
||||||
|
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
||||||
|
import TestKubeUpgradeState
|
||||||
|
|
||||||
|
|
||||||
|
class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestKubeUpgradePreCheckStage, self).setUp()
|
||||||
|
|
||||||
|
# Add the subcloud being processed by this unit test
|
||||||
|
# The subcloud is online, managed with deploy_state 'installed'
|
||||||
|
self.subcloud = self.setup_subcloud()
|
||||||
|
|
||||||
|
# Add the strategy_step state being processed by this unit test
|
||||||
|
self.strategy_step = \
|
||||||
|
self.setup_strategy_step(STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK)
|
||||||
|
|
||||||
|
# mock there not being a kube upgrade in progress
|
||||||
|
self.sysinv_client.get_kube_upgrades = mock.MagicMock()
|
||||||
|
self.sysinv_client.get_kube_upgrades.return_value = []
|
||||||
|
|
||||||
|
# mock the get_kube_versions calls
|
||||||
|
self.sysinv_client.get_kube_versions = mock.MagicMock()
|
||||||
|
self.sysinv_client.get_kube_versions.return_value = []
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade(self):
|
||||||
|
"""Test pre check step where the subcloud has a kube upgrade
|
||||||
|
|
||||||
|
When a kube upgrade exists in the subcloud, do not skip, go to the
|
||||||
|
next step, which is 'create the vim kube upgrade strategy'
|
||||||
|
"""
|
||||||
|
next_state = STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||||
|
# Update the subcloud to have deploy state as "complete"
|
||||||
|
db_api.subcloud_update(self.ctx,
|
||||||
|
self.subcloud.id,
|
||||||
|
deploy_status=DEPLOY_STATE_DONE)
|
||||||
|
self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()]
|
||||||
|
# get kube versions invoked only for the system controller
|
||||||
|
self.sysinv_client.get_kube_versions.return_value = [
|
||||||
|
FakeKubeVersion(obj_id=1,
|
||||||
|
version=UPGRADED_KUBE_VERSION,
|
||||||
|
target=True,
|
||||||
|
state='active'),
|
||||||
|
]
|
||||||
|
|
||||||
|
# invoke the strategy state operation on the orch thread
|
||||||
|
self.worker.perform_state_action(self.strategy_step)
|
||||||
|
|
||||||
|
# Verify the single query (for the system controller)
|
||||||
|
self.sysinv_client.get_kube_versions.assert_called_once()
|
||||||
|
|
||||||
|
# Verify the transition to the expected next state
|
||||||
|
self.assert_step_updated(self.strategy_step.subcloud_id, next_state)
|
||||||
|
|
||||||
|
def test_pre_check_no_sys_controller_active_version(self):
|
||||||
|
"""Test pre check step where system controller has no active version
|
||||||
|
|
||||||
|
The subcloud has no existing kube upgrade.
|
||||||
|
There is no 'to-version' indicated in extra args.
|
||||||
|
The target version is derived from the system controller. Inability
|
||||||
|
to query that version should fail orchestration.
|
||||||
|
"""
|
||||||
|
next_state = STRATEGY_STATE_FAILED
|
||||||
|
# Update the subcloud to have deploy state as "complete"
|
||||||
|
db_api.subcloud_update(self.ctx,
|
||||||
|
self.subcloud.id,
|
||||||
|
deploy_status=DEPLOY_STATE_DONE)
|
||||||
|
|
||||||
|
# No extra args / to-version in the database
|
||||||
|
# Query system controller kube versions
|
||||||
|
# override the first get, so that there is no active release
|
||||||
|
# 'partial' indicates the system controller is still upgrading
|
||||||
|
self.sysinv_client.get_kube_versions.return_value = [
|
||||||
|
FakeKubeVersion(obj_id=1,
|
||||||
|
version=PREVIOUS_KUBE_VERSION,
|
||||||
|
target=True,
|
||||||
|
state='partial'),
|
||||||
|
FakeKubeVersion(obj_id=2,
|
||||||
|
version=UPGRADED_KUBE_VERSION,
|
||||||
|
target=False,
|
||||||
|
state='unavailable'),
|
||||||
|
]
|
||||||
|
# invoke the strategy state operation on the orch thread
|
||||||
|
self.worker.perform_state_action(self.strategy_step)
|
||||||
|
|
||||||
|
# Verify the expected next state happened
|
||||||
|
self.assert_step_updated(self.strategy_step.subcloud_id, next_state)
|
||||||
|
|
||||||
|
def test_pre_check_no_subcloud_available_version(self):
|
||||||
|
"""Test pre check step where subcloud has no available version
|
||||||
|
|
||||||
|
This test simulates a fully upgraded system controller and subcloud.
|
||||||
|
In practice, the audit should not have added this subcloud to orch.
|
||||||
|
|
||||||
|
Setup:
|
||||||
|
- The subcloud has no existing kube upgrade.
|
||||||
|
- There is no 'to-version' indicated in extra args.
|
||||||
|
- System Controller has an 'active' version
|
||||||
|
- Subcloud has no 'available' version.
|
||||||
|
Expectation:
|
||||||
|
- Skip orchestration, jump to 'complete' for this state.
|
||||||
|
"""
|
||||||
|
# Update the subcloud to have deploy state as "complete"
|
||||||
|
db_api.subcloud_update(self.ctx,
|
||||||
|
self.subcloud.id,
|
||||||
|
deploy_status=DEPLOY_STATE_DONE)
|
||||||
|
|
||||||
|
# No extra args / to-version in the database
|
||||||
|
# Query system controller kube versions
|
||||||
|
self.sysinv_client.get_kube_versions.side_effect = [
|
||||||
|
[ # first list: (system controller) has an active release
|
||||||
|
FakeKubeVersion(obj_id=1,
|
||||||
|
version=PREVIOUS_KUBE_VERSION,
|
||||||
|
target=False,
|
||||||
|
state='unavailable'),
|
||||||
|
FakeKubeVersion(obj_id=2,
|
||||||
|
version=UPGRADED_KUBE_VERSION,
|
||||||
|
target=True,
|
||||||
|
state='active'),
|
||||||
|
],
|
||||||
|
[ # second list: (subcloud) fully upgraded (no available release)
|
||||||
|
FakeKubeVersion(obj_id=1,
|
||||||
|
version=PREVIOUS_KUBE_VERSION,
|
||||||
|
target=False,
|
||||||
|
state='unavailable'),
|
||||||
|
FakeKubeVersion(obj_id=2,
|
||||||
|
version=UPGRADED_KUBE_VERSION,
|
||||||
|
target=True,
|
||||||
|
state='active'),
|
||||||
|
],
|
||||||
|
]
|
||||||
|
# fully upgraded subcloud. Next state will be complete.
|
||||||
|
next_state = STRATEGY_STATE_COMPLETE
|
||||||
|
|
||||||
|
# invoke the strategy state operation on the orch thread
|
||||||
|
self.worker.perform_state_action(self.strategy_step)
|
||||||
|
|
||||||
|
# get_kube_versions gets called (more than once)
|
||||||
|
self.sysinv_client.get_kube_versions.assert_called()
|
||||||
|
|
||||||
|
# Verify the expected next state happened
|
||||||
|
self.assert_step_updated(self.strategy_step.subcloud_id, next_state)
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade_resumable(self):
|
||||||
|
"""Test pre check step where the subcloud has lower kube upgrade
|
||||||
|
|
||||||
|
When a kube upgrade exists in the subcloud, it is skipped if to-version
|
||||||
|
if less than its version. This test should not skip the subcloud.
|
||||||
|
"""
|
||||||
|
next_state = STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||||
|
# Update the subcloud to have deploy state as "complete"
|
||||||
|
db_api.subcloud_update(self.ctx,
|
||||||
|
self.subcloud.id,
|
||||||
|
deploy_status=DEPLOY_STATE_DONE)
|
||||||
|
|
||||||
|
low_version = "v1.2.3"
|
||||||
|
high_partial_version = "v1.3"
|
||||||
|
|
||||||
|
self.sysinv_client.get_kube_upgrades.return_value = [
|
||||||
|
FakeKubeUpgrade(to_version=low_version)
|
||||||
|
]
|
||||||
|
|
||||||
|
# The orchestrated version target is higher than the version of the
|
||||||
|
# existing upgrade in the subcloud, so the subcloud upgrade should
|
||||||
|
# continue
|
||||||
|
extra_args = {"to-version": high_partial_version}
|
||||||
|
self.strategy = fake_strategy.create_fake_strategy(
|
||||||
|
self.ctx,
|
||||||
|
self.DEFAULT_STRATEGY_TYPE,
|
||||||
|
extra_args=extra_args)
|
||||||
|
|
||||||
|
# invoke the strategy state operation on the orch thread
|
||||||
|
self.worker.perform_state_action(self.strategy_step)
|
||||||
|
|
||||||
|
# Do not need to mock query kube versions since extra args will be
|
||||||
|
# queried to get the info for the system controller
|
||||||
|
# and pre-existing upgrade is used for subcloud
|
||||||
|
self.sysinv_client.get_kube_versions.assert_not_called()
|
||||||
|
|
||||||
|
# Verify the transition to the expected next state
|
||||||
|
self.assert_step_updated(self.strategy_step.subcloud_id, next_state)
|
||||||
|
|
||||||
|
def _test_pre_check_subcloud_existing_upgrade_skip(self,
|
||||||
|
target_version,
|
||||||
|
subcloud_version):
|
||||||
|
"""Test pre check step where the subcloud existing upgrade too high.
|
||||||
|
|
||||||
|
When a kube upgrade exists in the subcloud, it is skipped if to-version
|
||||||
|
is less than the version of the existing upgrade.
|
||||||
|
For this test, the subcloud version is higher than the target, so
|
||||||
|
it should not be resumed and the skip should occur.
|
||||||
|
"""
|
||||||
|
next_state = STRATEGY_STATE_COMPLETE
|
||||||
|
# Update the subcloud to have deploy state as "complete"
|
||||||
|
db_api.subcloud_update(self.ctx,
|
||||||
|
self.subcloud.id,
|
||||||
|
deploy_status=DEPLOY_STATE_DONE)
|
||||||
|
|
||||||
|
self.sysinv_client.get_kube_upgrades.return_value = [
|
||||||
|
FakeKubeUpgrade(to_version=subcloud_version)
|
||||||
|
]
|
||||||
|
|
||||||
|
extra_args = {"to-version": target_version}
|
||||||
|
self.strategy = fake_strategy.create_fake_strategy(
|
||||||
|
self.ctx,
|
||||||
|
self.DEFAULT_STRATEGY_TYPE,
|
||||||
|
extra_args=extra_args)
|
||||||
|
|
||||||
|
# invoke the strategy state operation on the orch thread
|
||||||
|
self.worker.perform_state_action(self.strategy_step)
|
||||||
|
|
||||||
|
# Do not need to mock query kube versions since extra args will be
|
||||||
|
# queried to get the info for the system controller
|
||||||
|
# and pre-existing upgrade is used for subcloud
|
||||||
|
self.sysinv_client.get_kube_versions.assert_not_called()
|
||||||
|
|
||||||
|
# Verify the transition to the expected next state
|
||||||
|
self.assert_step_updated(self.strategy_step.subcloud_id, next_state)
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade_too_high(self):
|
||||||
|
target_version = "v1.2.1"
|
||||||
|
subcloud_version = "v1.3.3"
|
||||||
|
self._test_pre_check_subcloud_existing_upgrade_skip(target_version,
|
||||||
|
subcloud_version)
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade_too_high_target_partial(self):
|
||||||
|
target_version = "v1.2"
|
||||||
|
subcloud_version = "v1.3.3"
|
||||||
|
self._test_pre_check_subcloud_existing_upgrade_skip(target_version,
|
||||||
|
subcloud_version)
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade_too_high_subcl_partial(self):
|
||||||
|
target_version = "v1.2.1"
|
||||||
|
subcloud_version = "v1.3"
|
||||||
|
self._test_pre_check_subcloud_existing_upgrade_skip(target_version,
|
||||||
|
subcloud_version)
|
||||||
|
|
||||||
|
def _test_pre_check_subcloud_existing_upgrade_resume(self,
|
||||||
|
target_version,
|
||||||
|
subcloud_version):
|
||||||
|
"""Test pre check step where target version >= existing upgrade
|
||||||
|
|
||||||
|
When a kube upgrade exists in the subcloud, it is resumed if to-version
|
||||||
|
is the same or higher. The to-version can be a partial version.
|
||||||
|
Test supports partial values for target_version and subcloud_version
|
||||||
|
"""
|
||||||
|
next_state = STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||||
|
# Update the subcloud to have deploy state as "complete"
|
||||||
|
db_api.subcloud_update(self.ctx,
|
||||||
|
self.subcloud.id,
|
||||||
|
deploy_status=DEPLOY_STATE_DONE)
|
||||||
|
|
||||||
|
# Setup a fake kube upgrade in progress
|
||||||
|
self.sysinv_client.get_kube_upgrades.return_value = [
|
||||||
|
FakeKubeUpgrade(to_version=subcloud_version)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Setup a fake kube upgrade strategy with the to-version specified
|
||||||
|
extra_args = {"to-version": target_version}
|
||||||
|
self.strategy = fake_strategy.create_fake_strategy(
|
||||||
|
self.ctx,
|
||||||
|
self.DEFAULT_STRATEGY_TYPE,
|
||||||
|
extra_args=extra_args)
|
||||||
|
|
||||||
|
# invoke the strategy state operation on the orch thread
|
||||||
|
self.worker.perform_state_action(self.strategy_step)
|
||||||
|
|
||||||
|
# Do not need to mock query kube versions since extra args will be
|
||||||
|
# queried to get the info for the system controller
|
||||||
|
# and pre-existing upgrade is used for subcloud
|
||||||
|
self.sysinv_client.get_kube_versions.assert_not_called()
|
||||||
|
|
||||||
|
# Verify the transition to the expected next state
|
||||||
|
self.assert_step_updated(self.strategy_step.subcloud_id, next_state)
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade_match(self):
|
||||||
|
target_version = "v1.2.3"
|
||||||
|
subcloud_version = "v1.2.3"
|
||||||
|
self._test_pre_check_subcloud_existing_upgrade_resume(target_version,
|
||||||
|
subcloud_version)
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade_match_target_partial(self):
|
||||||
|
# v1.2 is considered the same as v1.2.3 (micro version gets ignored)
|
||||||
|
target_version = "v1.2"
|
||||||
|
subcloud_version = "v1.2.3"
|
||||||
|
self._test_pre_check_subcloud_existing_upgrade_resume(target_version,
|
||||||
|
subcloud_version)
|
||||||
|
|
||||||
|
def test_pre_check_subcloud_existing_upgrade_match_subcloud_partial(self):
|
||||||
|
# v1.2 is considered the same as v1.2.3 (micro version gets ignored)
|
||||||
|
target_version = "v1.2.3"
|
||||||
|
subcloud_version = "v1.2"
|
||||||
|
self._test_pre_check_subcloud_existing_upgrade_resume(target_version,
|
||||||
|
subcloud_version)
|
||||||
@@ -1,184 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
#
|
|
||||||
import mock
|
|
||||||
from os import path as os_path
|
|
||||||
|
|
||||||
from dcmanager.common import consts
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeVersion
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeLoad
|
|
||||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
|
||||||
import TestKubeUpgradeState
|
|
||||||
|
|
||||||
|
|
||||||
FAKE_LOAD_VERSION = '12.34'
|
|
||||||
DIFFERENT_LOAD_VERSION = '12.35'
|
|
||||||
|
|
||||||
|
|
||||||
class TestKubeUpdatingPatchesStage(TestKubeUpgradeState):
|
|
||||||
"Test uploading and applying the patces required for kube orch."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestKubeUpdatingPatchesStage, self).setUp()
|
|
||||||
|
|
||||||
# next state after updating patches is creating a vim patch strategy
|
|
||||||
self.on_success_state = \
|
|
||||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_PATCH_STRATEGY
|
|
||||||
|
|
||||||
# Add the subcloud being processed by this unit test
|
|
||||||
self.subcloud = self.setup_subcloud()
|
|
||||||
|
|
||||||
# Add the strategy_step state being processed by this unit test
|
|
||||||
self.strategy_step = self.setup_strategy_step(
|
|
||||||
consts.STRATEGY_STATE_KUBE_UPDATING_PATCHES)
|
|
||||||
|
|
||||||
# Add mock API endpoints for clients invoked by this state
|
|
||||||
self.patching_client.query = mock.MagicMock()
|
|
||||||
self.patching_client.query_hosts = mock.MagicMock()
|
|
||||||
self.patching_client.upload = mock.MagicMock()
|
|
||||||
self.patching_client.apply = mock.MagicMock()
|
|
||||||
self.sysinv_client.get_loads = mock.MagicMock()
|
|
||||||
self.sysinv_client.get_kube_version = mock.MagicMock()
|
|
||||||
self.sysinv_client.get_kube_versions = mock.MagicMock()
|
|
||||||
|
|
||||||
# Mock default results for APIs
|
|
||||||
self.sysinv_client.get_loads.side_effect = [
|
|
||||||
[FakeLoad(1,
|
|
||||||
software_version=FAKE_LOAD_VERSION,
|
|
||||||
state=consts.ACTIVE_LOAD_STATE)]
|
|
||||||
]
|
|
||||||
|
|
||||||
self.sysinv_client.get_kube_version.return_value = FakeKubeVersion()
|
|
||||||
self.sysinv_client.get_kube_versions.return_value = [
|
|
||||||
FakeKubeVersion(),
|
|
||||||
]
|
|
||||||
|
|
||||||
def test_success_no_patches(self):
|
|
||||||
"""Test behaviour when there are no region one patches.
|
|
||||||
|
|
||||||
The state machine should simply skip to the next state.
|
|
||||||
"""
|
|
||||||
|
|
||||||
REGION_ONE_PATCHES = {}
|
|
||||||
SUBCLOUD_PATCHES = {}
|
|
||||||
|
|
||||||
# patching client queries region one patches and then subcloud patches
|
|
||||||
self.patching_client.query.side_effect = [
|
|
||||||
REGION_ONE_PATCHES,
|
|
||||||
SUBCLOUD_PATCHES,
|
|
||||||
]
|
|
||||||
# hosts are queried to determine which patches are applied
|
|
||||||
self.patching_client.query_hosts.return_value = [
|
|
||||||
]
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# On success, the state should transition to the next state
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
self.on_success_state)
|
|
||||||
|
|
||||||
def test_success_no_patches_matching_load(self):
|
|
||||||
"""Test behaviour when no region one patches that match load.
|
|
||||||
|
|
||||||
The state machine should simply skip to the next state.
|
|
||||||
"""
|
|
||||||
|
|
||||||
REGION_ONE_PATCHES = {
|
|
||||||
'DC.1': {'sw_version': DIFFERENT_LOAD_VERSION,
|
|
||||||
'repostate': 'Applied',
|
|
||||||
'patchstate': 'Applied'},
|
|
||||||
}
|
|
||||||
SUBCLOUD_PATCHES = {}
|
|
||||||
|
|
||||||
# patching client queries region one patches and then subcloud patches
|
|
||||||
self.patching_client.query.side_effect = [
|
|
||||||
REGION_ONE_PATCHES,
|
|
||||||
SUBCLOUD_PATCHES,
|
|
||||||
]
|
|
||||||
# hosts are queried to determine which patches are applied
|
|
||||||
self.patching_client.query_hosts.return_value = [
|
|
||||||
]
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# On success, the state should transition to the next state
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
self.on_success_state)
|
|
||||||
|
|
||||||
@mock.patch.object(os_path, 'isfile')
|
|
||||||
def test_success_subcloud_needs_patch(self, mock_os_path_isfile):
|
|
||||||
"""Test behaviour when there is a region one patch not on subcloud.
|
|
||||||
|
|
||||||
The state machine should upload and apply the patch and proceed
|
|
||||||
to the next state.
|
|
||||||
: param mock_os_path_isfile: Mocking the file existence check for
|
|
||||||
the vault directory.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Mock that the patch is checked into the vault on disk
|
|
||||||
mock_os_path_isfile.return_value = True
|
|
||||||
|
|
||||||
REGION_ONE_PATCHES = {
|
|
||||||
'DC.1': {'sw_version': FAKE_LOAD_VERSION,
|
|
||||||
'repostate': 'Applied',
|
|
||||||
'patchstate': 'Applied'},
|
|
||||||
}
|
|
||||||
SUBCLOUD_PATCHES = {}
|
|
||||||
|
|
||||||
# patching client queries region one patches and then subcloud patches
|
|
||||||
self.patching_client.query.side_effect = [
|
|
||||||
REGION_ONE_PATCHES,
|
|
||||||
SUBCLOUD_PATCHES,
|
|
||||||
]
|
|
||||||
|
|
||||||
# hosts are queried to determine which patches are applied
|
|
||||||
self.patching_client.query_hosts.return_value = [
|
|
||||||
]
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# On success, the state should transition to the next state
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
self.on_success_state)
|
|
||||||
|
|
||||||
@mock.patch.object(os_path, 'isfile')
|
|
||||||
def test_fail_subcloud_needs_patch_not_in_vault(self, mock_os_path_isfile):
|
|
||||||
"""Test behaviour when there is a region one patch not on subcloud.
|
|
||||||
|
|
||||||
The state machine should upload and apply the patch and proceed
|
|
||||||
to the next state.
|
|
||||||
: param mock_os_path_isfile: Mocking the file existence check for
|
|
||||||
the vault directory.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Mock that the patch file is missing from the vault
|
|
||||||
mock_os_path_isfile.return_value = False
|
|
||||||
|
|
||||||
REGION_ONE_PATCHES = {
|
|
||||||
'DC.1': {'sw_version': FAKE_LOAD_VERSION,
|
|
||||||
'repostate': 'Applied',
|
|
||||||
'patchstate': 'Applied'},
|
|
||||||
}
|
|
||||||
SUBCLOUD_PATCHES = {}
|
|
||||||
|
|
||||||
# patching client queries region one patches and then subcloud patches
|
|
||||||
self.patching_client.query.side_effect = [
|
|
||||||
REGION_ONE_PATCHES,
|
|
||||||
SUBCLOUD_PATCHES,
|
|
||||||
]
|
|
||||||
|
|
||||||
# hosts are queried to determine which patches are applied
|
|
||||||
self.patching_client.query_hosts.return_value = [
|
|
||||||
]
|
|
||||||
|
|
||||||
# invoke the strategy state operation on the orch thread
|
|
||||||
self.worker.perform_state_action(self.strategy_step)
|
|
||||||
|
|
||||||
# A required patch was not in the vault. Fail this state
|
|
||||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
|
||||||
consts.STRATEGY_STATE_FAILED)
|
|
||||||
Reference in New Issue
Block a user