Changes to stx-openstack application automatic re-apply behaviour

The stx-openstack application is no longer automatically reapplied
on node unlock. The new behaviour is handled with a reapply flag:

 - When a node is unlocked, or a runtime manifest is applied,
   overrides are regenerated and compared to
   their old values.  If there is a difference a reapply flag is raised
   along with a warning alarm
 - A check was added to the kubernetes audit in the sysinv conductor
   to check if the reapply flag has been raised and to trigger a reapply
   if the system is in a stable state (no hosts currently
   locking/unlocking/booting)
 - This check is also done when a runtime manifest reports success

Test cases:
AIO-SX, AIO-DX, and Standard:
 - When a lock/unlock is done with no changes the application is
   not reapplied
 - When a lock/unlock is done after a config change is made the
   application waits until after the unlock and then triggers a reapply
STANDARD
 - Enabled ceph-rgw chart and ensured that the application was reapplied upon
   config success (likewise for chart disable)
 - If there is a pending reapply, and the user triggers it before the
   system is stable the reapply flag and alarm are removed
 - Provisioning a new compute node and unlocking it for the
   first time triggers an application reapply after it comes online
 - App is reapplied when a node is deleted
 - Compute added without node labels and unlocked results in no reapply
 - Compute locked, labels applied, then unlocked results in a reapply
   pods launch on compute only when labels present (likewise for label removal)
 - Pending reapply flag and alarm persist over a controller swact

Change-Id: I1ae9fdc2afcdf831cf0e7d96f8af14fcb5f6b579
Closes-Bug: 1837750
Depends-On: https://review.opendev.org/677845
Signed-off-by: Tyler Smith <tyler.smith@windriver.com>
This commit is contained in:
Tyler Smith 2019-08-21 18:07:34 -04:00
parent 024739e11b
commit b1895200a4
9 changed files with 258 additions and 69 deletions

View File

@ -2029,6 +2029,16 @@ class HostController(rest.RestController):
ihost_ret = pecan.request.rpcapi.configure_ihost(
pecan.request.context, ihost_obj)
# Trigger a system app reapply if the host has been unlocked.
# Only trigger the reapply if it is not during restore and the
# openstack app is applied
if (cutils.is_openstack_applied(pecan.request.dbapi) and
not os.path.isfile(tsc.RESTORE_IN_PROGRESS_FLAG) and
patched_ihost.get('action') in
[constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION]):
pecan.request.rpcapi.evaluate_app_reapply(
pecan.request.context)
pecan.request.dbapi.ihost_update(
ihost_obj['uuid'], {'capabilities': ihost_obj['capabilities']})
@ -2075,15 +2085,6 @@ class HostController(rest.RestController):
new_ihost_mtc,
constants.MTC_DEFAULT_TIMEOUT_IN_SECS)
elif new_ihost_mtc['operation'] == 'modify':
if utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX:
if new_ihost_mtc['action'] == constants.UNLOCK_ACTION:
if cutils.is_openstack_applied(pecan.request.dbapi):
# Unlock action on a simplex with stx-openstack
# applied, should remove the unlock ready flag
# because the application should be reapplied
# before unlock
LOG.info("Remove unlock ready flag")
self._remove_unlock_ready_flag()
mtc_response = mtce_api.host_modify(
self._api_token, self._mtc_address, self._mtc_port,
new_ihost_mtc,
@ -2136,13 +2137,6 @@ class HostController(rest.RestController):
ihost_obj['uuid'],
ibm_msg_dict)
# Trigger a system app reapply if the host has been unlocked.
# Only trigger the reapply if it is not during restore.
if (not os.path.isfile(tsc.RESTORE_IN_PROGRESS_FLAG) and
patched_ihost.get('action') in
[constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION]):
self._reapply_system_app()
elif mtc_response['status'] is None:
raise wsme.exc.ClientSideError(
_("Timeout waiting for maintenance response. "
@ -2500,35 +2494,10 @@ class HostController(rest.RestController):
pecan.request.dbapi.ihost_destroy(ihost_id)
# If the host being removed was an openstack worker node, trigger
# a reapply
# If the host being removed was an openstack worker node, check to see
# if a reapply is needed
if openstack_worker:
self._reapply_system_app()
def _remove_unlock_ready_flag(self):
pecan.request.rpcapi.remove_unlock_ready_flag(
pecan.request.context)
def _reapply_system_app(self):
try:
db_app = objects.kube_app.get_by_name(
pecan.request.context, constants.HELM_APP_OPENSTACK)
if db_app.status == constants.APP_APPLY_SUCCESS:
LOG.info(
"Reapplying the %s app" % constants.HELM_APP_OPENSTACK)
db_app.status = constants.APP_APPLY_IN_PROGRESS
db_app.progress = None
db_app.save()
pecan.request.rpcapi.perform_app_apply(
pecan.request.context, db_app, mode=None)
else:
LOG.info("%s system app is present but not applied, "
"skipping re-apply" % constants.HELM_APP_OPENSTACK)
except exception.KubeAppNotFound:
LOG.info(
"%s system app not present, skipping re-apply" %
constants.HELM_APP_OPENSTACK)
pecan.request.rpcapi.evaluate_app_reapply(pecan.request.context)
def _check_upgrade_provision_order(self, personality, hostname):
LOG.info("_check_upgrade_provision_order personality=%s, hostname=%s" %

View File

@ -186,6 +186,9 @@ CONFIG_STATUS_REINSTALL = "Reinstall required"
# when reinstall starts, mtc update the db with task = 'Reinstalling'
TASK_REINSTALLING = "Reinstalling"
TASK_BOOTING = "Booting"
TASK_UNLOCKING = "Unlocking"
TASK_TESTING = "Testing"
HOST_ACTION_STATE = "action_state"
HAS_REINSTALLING = "reinstalling"
@ -1379,6 +1382,8 @@ APP_INSTALL_ROOT_PATH = '/scratch'
APP_INSTALL_PATH = APP_INSTALL_ROOT_PATH + '/apps'
APP_SYNCED_DATA_PATH = os.path.join(tsc.PLATFORM_PATH, 'armada', tsc.SW_VERSION)
APP_METADATA_FILE = 'metadata.yaml'
APP_OPENSTACK_PENDING_REAPPLY_FLAG = os.path.join(
tsc.ARMADA_PATH, ".openstack_app_reapply")
# State constants
APP_NOT_PRESENT = 'missing'

View File

@ -1780,6 +1780,20 @@ class AppOperator(object):
fm_constants.FM_ALARM_TYPE_0,
_("No action required."),
True)
# Remove the pending auto re-apply if it is being triggered manually
if (app.name == constants.HELM_APP_OPENSTACK and
os.path.isfile(constants.APP_OPENSTACK_PENDING_REAPPLY_FLAG)):
# Consume the reapply flag
os.remove(constants.APP_OPENSTACK_PENDING_REAPPLY_FLAG)
# Clear the pending automatic reapply alarm
app_alarms = self._fm_api.get_faults_by_id(
fm_constants.FM_ALARM_ID_APPLICATION_REAPPLY_PENDING)
if app_alarms:
self._fm_api.clear_fault(app_alarms[0].alarm_id,
app_alarms[0].entity_instance_id)
LOG.info("Application %s (%s) apply started." % (app.name, app.version))
overrides_str = ''

View File

@ -33,6 +33,7 @@ import errno
import filecmp
import fnmatch
import glob
import hashlib
import math
import os
import re
@ -5085,8 +5086,75 @@ class ConductorManager(service.PeriodicService):
pass
if os.path.isfile(constants.APP_OPENSTACK_PENDING_REAPPLY_FLAG):
if self.check_nodes_stable():
LOG.info("Nodes are stable, beginning stx-openstack app "
"automated re-apply")
self.reapply_openstack_app(context)
else:
LOG.info("stx-openstack requires a re-apply but there are "
"currently node(s) in an unstable state. Will "
"retry on next audit")
else:
# Clear any stuck re-apply alarm
app_alarms = self.fm_api.get_faults_by_id(
fm_constants.FM_ALARM_ID_APPLICATION_REAPPLY_PENDING)
if app_alarms:
self.fm_api.clear_fault(app_alarms[0].alarm_id,
app_alarms[0].entity_instance_id)
LOG.debug("Periodic Task: _k8s_application_audit: Finished")
def check_nodes_stable(self):
hosts = self.dbapi.ihost_get_list()
if (utils.is_host_simplex_controller(hosts[0]) and
not hosts[0].vim_progress_status.startswith(
constants.VIM_SERVICES_ENABLED)):
# If the apply is triggered too early on AIO-SX, tiller will not
# be up and cause the re-apply to fail, so wait for services
# to enable
return False
for host in hosts:
if host.availability == constants.AVAILABILITY_INTEST:
return False
task_str = host.task or ""
if (task_str.startswith(constants.TASK_BOOTING) or
task_str.startswith(constants.TASK_TESTING) or
task_str.startswith(constants.TASK_UNLOCKING) or
task_str.startswith(constants.LOCKING) or
task_str.startswith(constants.FORCE_LOCKING)):
return False
return True
def reapply_openstack_app(self, context):
# Consume the reapply flag
os.remove(constants.APP_OPENSTACK_PENDING_REAPPLY_FLAG)
# Clear the pending automatic reapply alarm
app_alarms = self.fm_api.get_faults_by_id(
fm_constants.FM_ALARM_ID_APPLICATION_REAPPLY_PENDING)
if app_alarms:
self.fm_api.clear_fault(app_alarms[0].alarm_id,
app_alarms[0].entity_instance_id)
try:
app = kubeapp_obj.get_by_name(context, constants.HELM_APP_OPENSTACK)
if app.status == constants.APP_APPLY_SUCCESS:
LOG.info(
"Reapplying the %s app" % constants.HELM_APP_OPENSTACK)
app.status = constants.APP_APPLY_IN_PROGRESS
app.save()
greenthread.spawn(self._app.perform_app_apply, app, None)
else:
LOG.info("%s app is present but not applied, "
"skipping re-apply" % constants.HELM_APP_OPENSTACK)
except exception.KubeAppNotFound:
LOG.info(
"%s app not present, skipping re-apply" %
constants.HELM_APP_OPENSTACK)
def get_k8s_namespaces(self, context):
""" Get Kubernetes namespaces
:returns: list of namespaces
@ -6001,12 +6069,14 @@ class ConductorManager(service.PeriodicService):
return
# Identify the executed set of manifests executed
success = False
if reported_cfg == puppet_common.REPORT_DISK_PARTITON_CONFIG:
partition_uuid = iconfig['partition_uuid']
host_uuid = iconfig['host_uuid']
idisk_uuid = iconfig['idisk_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_partition_mgmt_success(host_uuid, idisk_uuid,
partition_uuid)
elif status == puppet_common.REPORT_FAILURE:
@ -6017,6 +6087,7 @@ class ConductorManager(service.PeriodicService):
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_lvm_cinder_config_success(context, host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
@ -6030,6 +6101,7 @@ class ConductorManager(service.PeriodicService):
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_ceph_config_success(context, host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
@ -6043,6 +6115,7 @@ class ConductorManager(service.PeriodicService):
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_ceph_external_config_success(context, host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
@ -6056,6 +6129,7 @@ class ConductorManager(service.PeriodicService):
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_external_config_success(host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
@ -6069,6 +6143,7 @@ class ConductorManager(service.PeriodicService):
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_ceph_services_config_success(host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
@ -6082,6 +6157,7 @@ class ConductorManager(service.PeriodicService):
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_ceph_base_config_success(host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
@ -6096,6 +6172,7 @@ class ConductorManager(service.PeriodicService):
stor_uuid = iconfig['stor_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
self.report_ceph_osd_config_success(host_uuid, stor_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
@ -6105,11 +6182,26 @@ class ConductorManager(service.PeriodicService):
LOG.error("No match for sysinv-agent manifest application reported! "
"reported_cfg: %(cfg)s status: %(status)s "
"iconfig: %(iconfig)s" % args)
elif reported_cfg == puppet_common.REPORT_CEPH_RADOSGW_CONFIG:
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
success = True
else:
LOG.error("Reported configuration '%(cfg)s' is not handled by"
" report_config_status! iconfig: %(iconfig)s" %
{'iconfig': iconfig, 'cfg': reported_cfg})
if success and \
os.path.isfile(constants.APP_OPENSTACK_PENDING_REAPPLY_FLAG):
if self.check_nodes_stable():
self.reapply_openstack_app(context)
else:
LOG.warning(
"stx-openstack requires a re-apply but could not trigger"
"during successful config report because there are "
"node(s) in an unstable state. Will be reapplied during "
"audit instead.")
def report_partition_mgmt_success(self, host_uuid, idisk_uuid,
partition_uuid):
""" Disk partition management success callback for Sysinv Agent
@ -6287,7 +6379,10 @@ class ConductorManager(service.PeriodicService):
config_dict = {
"personalities": personalities,
"classes": ['platform::ceph::rgw::keystone::runtime']
"classes": ['platform::ceph::rgw::keystone::runtime'],
puppet_common.REPORT_STATUS_CFG:
puppet_common.REPORT_CEPH_RADOSGW_CONFIG
}
self._config_apply_runtime_manifest(context,
@ -7047,7 +7142,9 @@ class ConductorManager(service.PeriodicService):
"personalities": personalities,
"classes": ['platform::ceph::rgw::runtime',
'platform::sm::rgw::runtime',
'platform::haproxy::runtime']
'platform::haproxy::runtime'],
puppet_common.REPORT_STATUS_CFG:
puppet_common.REPORT_CEPH_RADOSGW_CONFIG
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
@ -7981,6 +8078,8 @@ class ConductorManager(service.PeriodicService):
host_uuids=host_uuids,
force=force)
self.evaluate_app_reapply(context)
# Remove reboot required flag in case it's present. Runtime manifests
# are no supposed to clear this flag. A host lock/unlock cycle (or similar)
# should do it.
@ -10182,15 +10281,77 @@ class ConductorManager(service.PeriodicService):
"""
return self._fernet.get_fernet_keys(key_id)
def remove_unlock_ready_flag(self, context):
"""Remove the unlock ready flag if it exists
def evaluate_app_reapply(self, context):
"""Synchronously, determine whether an stx-openstack application
re-apply is needed, and if so, raise the re-apply flag.
:param context: request context.
"""
try:
os.remove(constants.UNLOCK_READY_FLAG)
except OSError:
pass
app = kubeapp_obj.get_by_name(context,
constants.HELM_APP_OPENSTACK)
app = self._app.Application(app, True)
except exception.KubeAppNotFound:
app = None
if app and app.status == constants.APP_APPLY_SUCCESS:
# Hash the existing overrides
# TODO these hashes can be stored in the db to reduce overhead,
# as well as removing the writing to disk of the new overrides
old_hash = {}
app.charts = self._app._get_list_of_charts(app.armada_mfile_abs)
(helm_files, armada_files) = self._app._get_overrides_files(
app.overrides_dir, app.charts, app.name, None)
for f in helm_files + armada_files:
with open(f, 'rb') as file:
old_hash[f] = hashlib.md5(file.read()).hexdigest()
# Regenerate overrides and compute new hash
new_hash = {}
app.charts = self._app._get_list_of_charts(app.armada_mfile_abs)
self._helm.generate_helm_application_overrides(
app.overrides_dir, app.name, None, cnamespace=None,
armada_format=True, armada_chart_info=app.charts, combined=True)
(helm_files, armada_files) = self._app._get_overrides_files(
app.overrides_dir, app.charts, app.name, None)
for f in helm_files + armada_files:
with open(f, 'rb') as file:
new_hash[f] = hashlib.md5(file.read()).hexdigest()
if cmp(old_hash, new_hash) != 0:
LOG.info("There has been an overrides change, setting up "
"stx-openstack app reapply")
self._setup_delayed_reapply()
else:
LOG.info("No override change after configuration action, "
"skipping re-apply")
else:
LOG.info("stx-openstack app status does not "
"warrant app re-apply")
def _setup_delayed_reapply(self):
open(constants.APP_OPENSTACK_PENDING_REAPPLY_FLAG, "w").close()
# Raise the pending automatic reapply alarm
entity_instance_id = "%s=%s" % \
(fm_constants.FM_ENTITY_TYPE_APPLICATION,
constants.HELM_APP_OPENSTACK)
fault = fm_api.Fault(
alarm_id=fm_constants.FM_ALARM_ID_APPLICATION_REAPPLY_PENDING,
alarm_state=fm_constants.FM_ALARM_STATE_SET,
entity_type_id=fm_constants.FM_ENTITY_TYPE_APPLICATION,
entity_instance_id=entity_instance_id,
severity=fm_constants.FM_ALARM_SEVERITY_WARNING,
reason_text=_(
"A configuration change requires a reapply of "
"the %s application.") % constants.HELM_APP_OPENSTACK,
alarm_type=fm_constants.FM_ALARM_TYPE_0,
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN,
proposed_repair_action=_(
"Ensure all hosts are either locked or unlocked. When "
"the system is stable the application will be "
"automatically reapplied."),
service_affecting=False)
self.fm_api.set_fault(fault)
def perform_app_upload(self, context, rpc_app, tarfile):
"""Handling of application upload request (via AppOperator)
@ -10212,17 +10373,12 @@ class ConductorManager(service.PeriodicService):
was_applied = self._app.is_app_active(rpc_app)
app_applied = self._app.perform_app_apply(rpc_app, mode)
appname = self._app.get_appname(rpc_app)
if constants.HELM_APP_OPENSTACK == appname and app_applied:
if was_applied:
# stx-openstack application was applied, this is a
# reapply action
# generate .unlock_ready flag
cutils.touch(constants.UNLOCK_READY_FLAG)
else:
# apply any runtime configurations that are needed for
# stx_openstack application
self._update_config_for_stx_openstack(context)
self._update_pciirqaffinity_config(context)
if constants.HELM_APP_OPENSTACK == appname and app_applied \
and not was_applied:
# apply any runtime configurations that are needed for
# stx_openstack application
self._update_config_for_stx_openstack(context)
self._update_pciirqaffinity_config(context)
# The radosgw chart may have been enabled/disabled. Regardless of
# the prior apply state, update the ceph config

View File

@ -1752,12 +1752,13 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
return self.call(context, self.make_msg('get_fernet_keys',
key_id=key_id))
def remove_unlock_ready_flag(self, context):
"""Synchronously, remove the unlock ready flag file.
def evaluate_app_reapply(self, context):
"""Synchronously, determine whether an stx-openstack application
re-apply is needed, and if so, raise the re-apply flag.
:param context: request context.
"""
return self.call(context, self.make_msg('remove_unlock_ready_flag'))
return self.call(context, self.make_msg('evaluate_app_reapply'))
def perform_app_upload(self, context, rpc_app, tarfile):
"""Handle application upload request

View File

@ -79,9 +79,13 @@ class NeutronHelm(openstack.OpenstackBaseHelm):
hosts = self.dbapi.ihost_get_list()
for host in hosts:
host_labels = self.dbapi.label_get_by_host(host.id)
if (host.invprovision in [constants.PROVISIONED,
constants.PROVISIONING]):
if constants.WORKER in utils.get_personalities(host):
constants.PROVISIONING] or
host.ihost_action in [constants.UNLOCK_ACTION,
constants.FORCE_UNLOCK_ACTION]):
if (constants.WORKER in utils.get_personalities(host) and
utils.has_openstack_compute(host_labels)):
hostname = str(host.hostname)
host_neutron = {

View File

@ -502,9 +502,13 @@ class NovaHelm(openstack.OpenstackBaseHelm):
hosts = self.dbapi.ihost_get_list()
for host in hosts:
host_labels = self.dbapi.label_get_by_host(host.id)
if (host.invprovision in [constants.PROVISIONED,
constants.PROVISIONING]):
if constants.WORKER in utils.get_personalities(host):
constants.PROVISIONING] or
host.ihost_action in [constants.UNLOCK_ACTION,
constants.FORCE_UNLOCK_ACTION]):
if (constants.WORKER in utils.get_personalities(host) and
utils.has_openstack_compute(host_labels)):
hostname = str(host.hostname)
default_config = {}

View File

@ -38,6 +38,7 @@ REPORT_CEPH_SERVICES_CONFIG = 'ceph_services'
REPORT_CEPH_MONITOR_CONFIG = 'ceph_monitor'
REPORT_PCI_SRIOV_CONFIG = 'pci_sriov_config'
REPORT_CEPH_OSD_CONFIG = 'ceph_osd'
REPORT_CEPH_RADOSGW_CONFIG = 'ceph_radosgw'
def puppet_apply_manifest(ip_address, personality,

View File

@ -32,6 +32,7 @@ class FakeConductorAPI(object):
self.remove_host_config = mock.MagicMock()
self.delete_barbican_secret = mock.MagicMock()
self.iplatform_update_by_ihost = mock.MagicMock()
self.evaluate_app_reapply = mock.MagicMock()
def create_ihost(self, context, values):
# Create the host in the DB as the code under test expects this
@ -680,6 +681,8 @@ class TestPatch(TestHost):
mock.ANY)
# Verify that the host was configured
self.fake_conductor_api.configure_ihost.assert_called_once()
# Verify that the app reapply was checked
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was added to maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -712,6 +715,8 @@ class TestPatch(TestHost):
mock.ANY)
# Verify that the host was configured
self.fake_conductor_api.configure_ihost.assert_called_once()
# Verify that the app reapply was checked
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -776,6 +781,8 @@ class TestPatch(TestHost):
mock.ANY)
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was not modified in maintenance
self.mock_mtce_api_host_modify.assert_not_called()
# Verify that the host action was cleared
@ -866,6 +873,8 @@ class TestPatch(TestHost):
mock.ANY)
# Verify that the host was configured
self.fake_conductor_api.configure_ihost.assert_called_once()
# Verify that the app reapply was checked
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was added to maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -916,6 +925,8 @@ class TestPatch(TestHost):
mock.ANY)
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was not modified in maintenance
self.mock_mtce_api_host_modify.assert_not_called()
# Verify that the host action was cleared
@ -964,6 +975,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -1001,6 +1014,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -1036,6 +1051,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -1071,6 +1088,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -1107,6 +1126,8 @@ class TestPatch(TestHost):
mock.ANY, c1_host['uuid'])
# Verify that the reinstall was not sent to the VIM
self.mock_vim_api_host_action.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was configured
self.fake_conductor_api.configure_ihost.assert_called_once()
# Verify that the host was modified in maintenance
@ -1144,6 +1165,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -1179,6 +1202,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host action was cleared
@ -1215,6 +1240,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was not modified in maintenance
self.mock_mtce_api_host_modify.assert_not_called()
# Verify that the host was updated
@ -1258,6 +1285,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was modified in maintenance
self.mock_mtce_api_host_modify.assert_called_once()
# Verify that the host was updated
@ -1302,6 +1331,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was not modified in maintenance
self.mock_mtce_api_host_modify.assert_not_called()
# Verify that the host was not updated
@ -1345,6 +1376,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was not modified in maintenance
self.mock_mtce_api_host_modify.assert_not_called()
# Verify that the host was not updated
@ -1387,6 +1420,8 @@ class TestPatch(TestHost):
self.mock_vim_api_host_action.assert_not_called()
# Verify that the host was not configured
self.fake_conductor_api.configure_ihost.assert_not_called()
# Verify that the app reapply evaluate was not configured
self.fake_conductor_api.evaluate_app_reapply.assert_not_called()
# Verify that the host was not modified in maintenance
self.mock_mtce_api_host_modify.assert_not_called()
# Verify that the host was not updated