From 615340ce6230b4f1831298f68241e27a939bf151 Mon Sep 17 00:00:00 2001 From: Eric MacDonald Date: Wed, 22 Apr 2020 07:59:27 -0400 Subject: [PATCH] Add orchestrated fw update support to vim This update adds firmware update orchestration support to the VIM for host device fpga images as described in the 'N3000 FPGA device image update orchestration' feature specification. See https://review.opendev.org/#/c/713302/ The firmware update orchestration strategy added to the VIM is modeled after the VIM's existing patch orchestration and includes strategy create/apply/show/abort/delete. The strategy can be managed through the existing sw-manager tool via the VIM's REST API. Only unlocked hosts with the worker function are included in the strategy. The strategy includes a stage for each host or group of hosts with ordered step sequences of firmware update followed by a lock and unlock of each updated host. Change-Id: I4eea7ca3f00782d27cdbb5d5615c6a255ac6966e Story: 2006740 Task: 39145 Signed-off-by: Eric MacDonald --- api-ref/source/api-ref-nfv-vim-v1.rst | 653 ++++++ nfv/centos/build_srpm.data | 2 +- .../nfv_client/openstack/sw_update.py | 5 +- nfv/nfv-client/nfv_client/shell.py | 140 +- .../nfv_client/sw_update/__init__.py | 3 +- .../nfv_client/sw_update/_sw_update.py | 5 +- .../alarm/objects/v1/_alarm_defs.py | 5 +- .../event_log/objects/v1/_event_log_defs.py | 13 +- .../nfv_plugins/alarm_handlers/fm.py | 8 +- .../nfv_plugins/event_log_handlers/fm.py | 24 +- .../nfvi_plugins/nfvi_infrastructure_api.py | 225 +- .../nfvi_plugins/openstack/sysinv.py | 80 +- .../vim_orchestration_test_cases.txt | 71 + .../tests/sw_update_testcase.py | 337 +++ .../tests/test_fw_update_strategy.py | 2076 +++++++++++++++++ .../nfv_unit_tests/tests/test_nfv_client.py | 17 + .../tests/test_sw_patch_strategy.py | 547 +---- .../tests/test_sw_upgrade_strategy.py | 415 +--- nfv/nfv-vim/nfv_vim/alarm/_sw_update.py | 37 +- .../v1/orchestration/_controller.py | 8 +- .../v1/orchestration/sw_update/__init__.py | 3 +- .../v1/orchestration/sw_update/_fw_update.py | 54 + .../sw_update/_sw_update_defs.py | 6 +- .../sw_update/_sw_update_strategy.py | 77 +- .../nfv_vim/database/_database_sw_update.py | 5 +- nfv/nfv-vim/nfv_vim/debug.ini | 3 +- .../nfv_vim/directors/_directors_defs.py | 4 +- .../nfv_vim/directors/_host_director.py | 140 +- .../nfv_vim/directors/_sw_mgmt_director.py | 50 +- .../events/_vim_sw_update_api_events.py | 17 +- nfv/nfv-vim/nfv_vim/nfvi/__init__.py | 6 +- .../nfvi/_nfvi_infrastructure_module.py | 43 +- .../nfv_vim/nfvi/objects/v1/__init__.py | 3 +- .../nfvi/objects/v1/_host_fw_update.py | 17 + nfv/nfv-vim/nfv_vim/objects/__init__.py | 3 +- nfv/nfv-vim/nfv_vim/objects/_fw_update.py | 187 ++ nfv/nfv-vim/nfv_vim/objects/_sw_update.py | 3 +- nfv/nfv-vim/nfv_vim/strategy/__init__.py | 6 +- nfv/nfv-vim/nfv_vim/strategy/_strategy.py | 382 ++- .../nfv_vim/strategy/_strategy_defs.py | 21 +- .../nfv_vim/strategy/_strategy_stages.py | 6 +- .../nfv_vim/strategy/_strategy_steps.py | 467 +++- 42 files changed, 5356 insertions(+), 818 deletions(-) create mode 100755 nfv/nfv-tests/nfv_unit_tests/tests/sw_update_testcase.py create mode 100755 nfv/nfv-tests/nfv_unit_tests/tests/test_fw_update_strategy.py create mode 100755 nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_fw_update.py create mode 100755 nfv/nfv-vim/nfv_vim/nfvi/objects/v1/_host_fw_update.py create mode 100644 nfv/nfv-vim/nfv_vim/objects/_fw_update.py diff --git a/api-ref/source/api-ref-nfv-vim-v1.rst b/api-ref/source/api-ref-nfv-vim-v1.rst index dc460a54..a4bfa1b3 100644 --- a/api-ref/source/api-ref-nfv-vim-v1.rst +++ b/api-ref/source/api-ref-nfv-vim-v1.rst @@ -114,6 +114,10 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404) { "href": "http://192.168.204.2:4545/orchestration/sw-upgrade/", "rel": "sw-upgrade" + }, + { + "href": "http://192.168.204.2:4545/orchestration/fw-update/", + "rel": "fw-update" } ] } @@ -186,6 +190,39 @@ forbidden (403), badMethod (405), overLimit (413), itemNotFound (404) This operation does not accept a request body. +********************************************************************** +Lists information about all NFV VIM API orchestration fw-update links +********************************************************************** + +.. rest_method:: GET /api/orchestration/fw-update + +**Normal response codes** + +200 + +**Error response codes** + +serviceUnavailable (503), badRequest (400), unauthorized (401), +forbidden (403), badMethod (405), overLimit (413), itemNotFound (404) + +:: + + { + "id": "fw-update", + "links": [ + { + "href": "http://192.168.204.2:4545/orchestration/fw-update/", + "rel": "self" + }, + { + "href": "http://192.168.204.2:4545/orchestration/fw-update/strategy/", + "rel": "strategy" + } + ] + } + +This operation does not accept a request body. + --------------- Patch Strategy --------------- @@ -2441,4 +2478,620 @@ forbidden (403), badMethod (405), overLimit (413) } } +------------------------ +Firmware Update Strategy +------------------------ + +Firmware update orchestration is done with a firmware update orchestration +strategy, or plan, for the automated update procedure which contains a number +of parameters for customizing the particular behavior of the firmware update +orchestration. + +*************************************************************** +Shows detailed information about the current fw-update strategy +*************************************************************** + +.. rest_method:: GET /api/orchestration/fw-update/strategy + +**Normal response codes** + +200 + +**Error response codes** + +serviceUnavailable (503), badRequest (400), unauthorized (401), +forbidden (403), badMethod (405), overLimit (413), itemNotFound (404) + +:: + + { + "strategy": { + "controller-apply-type": "ignore", + "swift-apply-type": "ignore", + "storage-apply-type": "ignore", + "worker-apply-type": "serial", + "state": "ready-to-apply", + "default-instance-action": "stop-start", + "max-parallel-worker-hosts": 2, + "alarm-restrictions": "strict", + "current-phase-completion-percentage": 100, + "uuid": "5dd16d94-dfc5-4029-bfcb-d815e7c2dc3d", + "name": "fw-update", + "current-phase": "build", + "build-phase": { + "phase-name": "build", + "current-stage": 1, + "total-stages": 1, + "completion-percentage": 100, + "start-date-time": "2020-05-05 21:07:18", + "end-date-time": "2020-05-05 21:07:19", + "stop-at-stage": 1, + "result": "success", + "timeout": 182, + "reason": "", + "inprogress": false, + "stages": [ + { + "stage-id": 0, + "total-steps": 3, + "stage-name": "fw-update-hosts-query", + "result": "success", + "timeout": 181, + "inprogress": false, + "start-date-time": "2020-05-05 21:07:18", + "end-date-time": "2020-05-05 21:07:19", + "reason": "", + "current-step" : 3, + "steps":[ + { + "step-id": 0, + "step-name": "query-alarms", + "entity-type": "", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "2020-05-05 21:07:18", + "end-date-time": "2020-05-05 21:07:19", + "timeout": 60, + "result": "success", + "reason": "" + }, + { + "step-id": 1, + "step-name": "query-host-devices", + "entity-type": "", + "entity-names": ["compute-1"], + "entity-uuids": ["ecff0928-9655-46ed-9ac0-433dfa21c7e2"], + "start-date-time": "2020-05-05 21:07:19", + "end-date-time": "2020-05-05 21:07:19", + "timeout": 60, + "result": "success", + "reason": "" + }, + { + "step-id": 2, + "step-name": "query-host-devices", + "entity-type": "", + "entity-names": ["compute-0"], + "entity-uuids": ["fa62c159-7b2c-47f5-bbda-126bc5e7de21"], + "start-date-time": "2020-05-05 21:07:19", + "end-date-time": "2020-05-05 21:07:19", + "timeout": 60, + "result": "success", + "reason": "" + } + ] + } + ] + }, + "apply-phase": { + "phase-name": "apply", + "current-stage": 0, + "completion-percentage": 100, + "total-stages": 2, + "stop-at-stage": 0, + "start-date-time": "", + "end-date-time": "", + "result": "initial", + "timeout": 0, + "reason": "", + "inprogress": false, + "stages": [ + { + "stage-id": 0, + "stage-name": "fw-update-worker-hosts", + "start-date-time": "", + "end-date-time": "", + "current-step": 0, + "result": "initial", + "timeout": 6436, + "inprogress": false, + "reason": "", + "total-steps": 6, + "steps": [ + { + "step-id": 0, + "step-name": "query-alarms", + "entity-type": "", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + }, + { + "step-id": 1, + "entity-type": "hosts", + "step-name": "fw-update-hosts", + "entity-names": ["compute-1"], + "entity-uuids": ["ecff0928-9655-46ed-9ac0-433dfa21c7e2"], + "start-date-time": "", + "end-date-time": "", + "timeout": 3600, + "result": "initial", + "reason": "" + }, + { + "step-id": 2, + "entity-type": "hosts", + "step-name": "lock-hosts", + "entity-names": ["compute-1"], + "entity-uuids": ["ecff0928-9655-46ed-9ac0-433dfa21c7e2"], + "start-date-time": "", + "end-date-time": "", + "timeout": 900, + "result": "initial", + "reason": "" + }, + { + "step-id": 3, + "entity-type": "", + "step-name": "system-stabilize", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "", + "end-date-time": "", + "timeout": 15, + "result": "initial", + "reason": "" + }, + { + "step-id": 4, + "entity-type": "hosts", + "step-name": "unlock-hosts", + "entity-names": ["compute-1"], + "entity-uuids": ["ecff0928-9655-46ed-9ac0-433dfa21c7e2"], + "start-date-time": "", + "end-date-time": "", + "timeout": 1800, + "result": "initial", + "reason": "" + }, + { + "step-id": 5, + "entity-type": "", + "step-name": "system-stabilize", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + } + ], + }, + { + "stage-id": 1, + "total-steps": 6, + "stage-name": "fw-update-worker-hosts", + "inprogress": false, + "start-date-time": "", + "end-date-time": "", + "timeout": 6436, + "reason": "", + "result": "initial", + "current-step": 0, + "steps":[ + { + "step-id": 0, + "step-name": "query-alarms", + "entity-type": "", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + }, + { + "step-id":1, + "step-name": "fw-update-hosts", + "entity-type": "hosts", + "entity-names": ["compute-0"], + "entity-uuids": ["fa62c159-7b2c-47f5-bbda-126bc5e7de21"], + "start-date-time": "", + "end-date-time": "", + "timeout": 3600, + "result": "initial", + "reason": "" + }, + { + "step-id": 2, + "step-name": "lock-hosts", + "entity-type": "hosts", + "entity-names": ["compute-0"], + "entity-uuids": ["fa62c159-7b2c-47f5-bbda-126bc5e7de21"], + "start-date-time": "", + "end-date-time": "", + "timeout": 900, + "result": "initial", + "reason": "" + }, + { + "step-id": 3, + "step-name": "system-stabilize", + "entity-type": "", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "", + "end-date-time": "", + "timeout": 15, + "result": "initial", + "reason": "" + }, + { + "step-id": 4, + "step-name": "unlock-hosts", + "entity-type": "hosts", + "entity-names": ["compute-0"], + "entity-uuids": ["fa62c159-7b2c-47f5-bbda-126bc5e7de21"], + "start-date-time": "", + "end-date-time": "", + "timeout": 1800, + "result": "initial", + "reason": "" + }, + { + "step-id": 5, + "step-name": "system-stabilize", + "entity-type": "", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + } + ], + } + ], + }, + "abort-phase": { + "phase-name": "abort", + "total-stages": 0, + "completion-percentage": 100, + "start-date-time": "", + "end-date-time": "", + "stop-at-stage": 0, + "result": "initial", + "timeout": 0, + "reason": "", + "inprogress": false, + "stages": [], + "current-stage": 0 + } + } + } + +This operation does not accept a request body. + +**************************** +Creates a fw-update strategy +**************************** + +.. rest_method:: POST /api/orchestration/fw-update/strategy + +**Normal response codes** + +200 + +**Error response codes** + +serviceUnavailable (503), badRequest (400), unauthorized (401), +forbidden (403), badMethod (405), overLimit (413) + +**Request parameters** + +.. csv-table:: + :header: "Parameter", "Style", "Type", "Description" + :widths: 20, 20, 20, 60 + + "controller-apply-type", "plain", "xsd:string", "The apply type for controller hosts: ``ignore``." + "storage-apply-type", "plain", "xsd:string", "The apply type for storage hosts: ``ignore``." + "worker-apply-type", "plain", "xsd:string", "The apply type for worker hosts: ``serial``, ``parallel`` or ``ignore``." + "max-parallel-worker-hosts (Optional)", "plain", "xsd:integer", "The maximum number of worker hosts to patch in parallel; only applicable if ``worker-apply-type = parallel``. Default value is ``2``." + "default-instance-action", "plain", "xsd:string", "The default instance action: ``stop-start`` or ``migrate``." + "alarm-restrictions (Optional)", "plain", "xsd:string", "The strictness of alarm checks: ``strict`` or ``relaxed``." + +:: + + { + "controller-apply-type": "ignore", + "storage-apply-type": "ignore", + "worker-apply-type": "serial", + "default-instance-action": "stop-start", + "alarm-restrictions": "strict", + } + +:: + + { + "strategy": { + "name": "fw-update", + "worker-apply-type": "serial", + "controller-apply-type": "ignore", + "swift-apply-type": "ignore", + "storage-apply-type": "ignore", + "current-phase-completion-percentage": 0, + "uuid": "447c4267-0ecb-48f4-9237-1d747a3e7cca", + "default-instance-action": "stop-start", + "max-parallel-worker-hosts": 2, + "alarm-restrictions": "strict", + "state": "building", + "build-phase": { + "phase-name": "build", + "current-stage": 0, + "start-date-time": "2020-05-06 13:26:11", + "end-date-time": "", + "completion-percentage": 0, + "stop-at-stage": 1, + "result": "inprogress", + "timeout": 182, + "reason": "", + "inprogress": true, + "total-stages": 1, + "stages": [ + { + "stage-id": 0, + "stage-name": "fw-update-hosts-query", + "total-steps": 3, + "inprogress": true, + "start-date-time": "2020-05-06 13:26:11", + "end-date-time": "", + "reason": "", + "current-step": 0, + "result": "inprogress", + "timeout": 181, + "steps": [ + { + "step-id": 0, + "step-name": "query-alarms", + "entity-type": "", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "2020-05-06 13:26:11", + "end-date-time": "", + "timeout": 60, + "result": "wait", + "reason": "" + }, + { + "step-id": 1, + "step-name": "query-host-devices", + "entity-type": "", + "entity-names": ["compute-1"], + "entity-uuids": ["ecff0928-9655-46ed-9ac0-433dfa21c7e2"], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + }, + { + "step-id": 2, + "step-name": "query-host-devices", + "entity-type": "", + "entity-names": ["compute-0"], + "entity-uuids": ["fa62c159-7b2c-47f5-bbda-126bc5e7de21"], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + } + ], + } + ], + }, + "apply-phase": { + "start-date-time": "", + "end-date-time": "", + "phase-name": "apply", + "completion-percentage": 100, + "total-stages": 0, + "stop-at-stage": 0, + "result": "initial", + "timeout": 0, + "reason": "", + "inprogress": false, + "stages": [], + "current-stage": 0 + }, + "abort-phase": { + "start-date-time": "", + "end-date-time": "", + "phase-name": "abort", + "completion-percentage": 100, + "total-stages": 0, + "stop-at-stage": 0, + "result": "initial", + "timeout": 0, + "reason": "", + "inprogress":false, + "stages": [], + "current-stage": 0 + } + } + } + +************************************** +Deletes the current fw-update strategy +************************************** + +.. rest_method:: DELETE /api/orchestration/fw-update/strategy + +**Normal response codes** + +204 + +:: + + { + } + +************************************** +Applies or aborts a fw-update strategy +************************************** + +.. rest_method:: POST /api/orchestration/fw-update/strategy/actions + +**Normal response codes** + +202 + +**Error response codes** + +serviceUnavailable (503), badRequest (400), unauthorized (401), +forbidden (403), badMethod (405), overLimit (413) + +**Request parameters** + +.. csv-table:: + :header: "Parameter", "Style", "Type", "Description" + :widths: 20, 20, 20, 60 + + "action", "plain", "xsd:string", "The action to take: ``apply-all``, ``apply-stage``, ``abort`` or ``abort-stage``." + "stage-id (Optional)", "plain", "xsd:string", "The stage-id to apply or abort. Only used with ``apply-stage`` or ``abort-stage`` actions." + +:: + + { + "action": "apply-all" + } + +:: + + { + "strategy":{ + "controller-apply-type": "ignore", + "swift-apply-type": "ignore", + "current-phase-completion-percentage": 0, + "uuid": "447c4267-0ecb-48f4-9237-1d747a3e7cca", + "name": "fw-update", + "current-phase": "build", + "storage-apply-type": "ignore", + "state":"building", + "worker-apply-type": "serial", + "default-instance-action": "stop-start", + "max-parallel-worker-hosts": 2, + "alarm-restrictions": "strict", + "build-phase": { + "phase-name": "build", + "current-stage": 0, + "start-date-time": "2020-05-06 13:26:11", + "end-date-time": "", + "completion-percentage": 0, + "stop-at-stage": 1, + "result": "inprogress", + "timeout": 182, + "reason": "", + "inprogress": true, + "total-stages": 1, + "stages": [ + { + "stage-id": 0, + "stage-name": "fw-update-hosts-query", + "total-steps": 3, + "inprogress": true, + "start-date-time": "2020-05-06 13:26:11", + "end-date-time": "", + "reason": "", + "current-step": 0, + "result": "inprogress", + "timeout": 181, + "steps": [ + { + "step-id": 0, + "step-name": "query-alarms", + "entity-type": "", + "entity-names": [], + "entity-uuids": [], + "start-date-time": "2020-05-06 13:26:11", + "end-date-time": "", + "timeout": 60, + "result": "wait", + "reason": "" + }, + { + "step-id": 1, + "step-name": "query-host-devices", + "entity-type": "", + "entity-names": ["compute-1"], + "entity-uuids": ["ecff0928-9655-46ed-9ac0-433dfa21c7e2"], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + }, + { + "step-id": 2, + "step-name": "query-host-devices", + "entity-type": "", + "entity-names": ["compute-0"], + "entity-uuids": ["fa62c159-7b2c-47f5-bbda-126bc5e7de21"], + "start-date-time": "", + "end-date-time": "", + "timeout": 60, + "result": "initial", + "reason": "" + } + ] + } + ] + }, + "apply-phase": { + "start-date-time": "", + "end-date-time": "", + "phase-name": "apply", + "completion-percentage": 100, + "total-stages": 0, + "stop-at-stage": 0, + "result": "initial", + "timeout": 0, + "reason": "", + "inprogress": false, + "stages": [], + "current-stage": 0 + }, + "abort-phase": { + "start-date-time": "", + "end-date-time": "", + "phase-name": "abort", + "completion-percentage": 100, + "total-stages": 0, + "stop-at-stage": 0, + "result": "initial", + "timeout": 0, + "reason": "", + "inprogress": false, + "stages": [], + "current-stage": 0 + } + } + } diff --git a/nfv/centos/build_srpm.data b/nfv/centos/build_srpm.data index 3e3e543a..a80e9d02 100755 --- a/nfv/centos/build_srpm.data +++ b/nfv/centos/build_srpm.data @@ -1 +1 @@ -TIS_PATCH_VER=77 +TIS_PATCH_VER=78 diff --git a/nfv/nfv-client/nfv_client/openstack/sw_update.py b/nfv/nfv-client/nfv_client/openstack/sw_update.py index b669f76f..fa3f0d91 100755 --- a/nfv/nfv-client/nfv_client/openstack/sw_update.py +++ b/nfv/nfv-client/nfv_client/openstack/sw_update.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016,2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -228,6 +228,9 @@ def create_strategy(token_id, url, strategy_name, controller_apply_type, api_cmd_payload['controller-apply-type'] = controller_apply_type api_cmd_payload['swift-apply-type'] = swift_apply_type api_cmd_payload['default-instance-action'] = default_instance_action + elif 'fw-update' == strategy_name: + api_cmd_payload['controller-apply-type'] = controller_apply_type + api_cmd_payload['default-instance-action'] = default_instance_action elif 'sw-upgrade' == strategy_name: if 'start_upgrade' in kwargs and kwargs['start_upgrade']: api_cmd_payload['start-upgrade'] = True diff --git a/nfv/nfv-client/nfv_client/shell.py b/nfv/nfv-client/nfv_client/shell.py index 8553e118..5d44d81c 100755 --- a/nfv/nfv-client/nfv_client/shell.py +++ b/nfv/nfv-client/nfv_client/shell.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016,2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -155,6 +155,73 @@ def process_main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value sw_upgrade_show_strategy_cmd.add_argument( '--details', action='store_true', help='show strategy details') + # Firmware Update Commands + fw_update_parser = commands.add_parser('fw-update-strategy', + help='Firmware Update Strategy') + fw_update_parser.set_defaults(cmd_area='fw-update-strategy') + + fw_update_cmds = fw_update_parser.add_subparsers( + title='Firmware Update Commands', metavar='') + fw_update_cmds.required = True + + fw_update_create_strategy_cmd \ + = fw_update_cmds.add_parser('create', help='Create a strategy') + fw_update_create_strategy_cmd.set_defaults(cmd='create') + fw_update_create_strategy_cmd.add_argument('--controller-apply-type', + default=sw_update.APPLY_TYPE_IGNORE, + choices=[sw_update.APPLY_TYPE_IGNORE], + help='defaults to ignore') + fw_update_create_strategy_cmd.add_argument('--storage-apply-type', + default=sw_update.APPLY_TYPE_IGNORE, + choices=[sw_update.APPLY_TYPE_IGNORE], + help='defaults to ignore') + fw_update_create_strategy_cmd.add_argument('--worker-apply-type', + default=sw_update.APPLY_TYPE_SERIAL, + choices=[sw_update.APPLY_TYPE_SERIAL, + sw_update.APPLY_TYPE_PARALLEL, + sw_update.APPLY_TYPE_IGNORE], + help='defaults to serial') + + fw_update_create_strategy_cmd.add_argument( + '--max-parallel-worker-hosts', type=int, choices=range(2, 11), + help='maximum worker hosts to update in parallel') + + fw_update_create_strategy_cmd.add_argument('--instance-action', + default=sw_update.INSTANCE_ACTION_STOP_START, + choices=[sw_update.INSTANCE_ACTION_MIGRATE, + sw_update.INSTANCE_ACTION_STOP_START], + help='defaults to stop-start') + + fw_update_create_strategy_cmd.add_argument('--alarm-restrictions', + default=sw_update.ALARM_RESTRICTIONS_STRICT, + choices=[sw_update.ALARM_RESTRICTIONS_STRICT, + sw_update.ALARM_RESTRICTIONS_RELAXED], + help='defaults to strict') + + fw_update_delete_strategy_cmd \ + = fw_update_cmds.add_parser('delete', help='Delete a strategy') + fw_update_delete_strategy_cmd.set_defaults(cmd='delete') + fw_update_delete_strategy_cmd.add_argument( + '--force', action='store_true', help=argparse.SUPPRESS) + + fw_update_apply_strategy_cmd \ + = fw_update_cmds.add_parser('apply', help='Apply a strategy') + fw_update_apply_strategy_cmd.set_defaults(cmd='apply') + fw_update_apply_strategy_cmd.add_argument( + '--stage-id', default=None, help='stage identifier to apply') + + fw_update_abort_strategy_cmd \ + = fw_update_cmds.add_parser('abort', help='Abort a strategy') + fw_update_abort_strategy_cmd.set_defaults(cmd='abort') + fw_update_abort_strategy_cmd.add_argument( + '--stage-id', help='stage identifier to abort') + + fw_update_show_strategy_cmd \ + = fw_update_cmds.add_parser('show', help='Show a strategy') + fw_update_show_strategy_cmd.set_defaults(cmd='show') + fw_update_show_strategy_cmd.add_argument( + '--details', action='store_true', help='show strategy details') + args = parser.parse_args(argv) if args.debug: @@ -330,6 +397,77 @@ def process_main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value else: raise ValueError("Unknown command, %s, given for upgrade-strategy" % args.cmd) + elif 'fw-update-strategy' == args.cmd_area: + if 'create' == args.cmd: + sw_update.create_strategy( + args.os_auth_url, + args.os_project_name, + args.os_project_domain_name, + args.os_username, + args.os_password, + args.os_user_domain_name, + args.os_region_name, + args.os_interface, + sw_update.STRATEGY_NAME_FW_UPDATE, + args.controller_apply_type, + args.storage_apply_type, + sw_update.APPLY_TYPE_IGNORE, + args.worker_apply_type, + args.max_parallel_worker_hosts, + args.instance_action, + args.alarm_restrictions) + + elif 'delete' == args.cmd: + sw_update.delete_strategy(args.os_auth_url, + args.os_project_name, + args.os_project_domain_name, + args.os_username, + args.os_password, + args.os_user_domain_name, + args.os_region_name, + args.os_interface, + sw_update.STRATEGY_NAME_FW_UPDATE, + args.force) + + elif 'apply' == args.cmd: + sw_update.apply_strategy(args.os_auth_url, + args.os_project_name, + args.os_project_domain_name, + args.os_username, + args.os_password, + args.os_user_domain_name, + args.os_region_name, + args.os_interface, + sw_update.STRATEGY_NAME_FW_UPDATE, + args.stage_id) + + elif 'abort' == args.cmd: + sw_update.abort_strategy(args.os_auth_url, + args.os_project_name, + args.os_project_domain_name, + args.os_username, + args.os_password, + args.os_user_domain_name, + args.os_region_name, + args.os_interface, + sw_update.STRATEGY_NAME_FW_UPDATE, + args.stage_id) + + elif 'show' == args.cmd: + sw_update.show_strategy(args.os_auth_url, + args.os_project_name, + args.os_project_domain_name, + args.os_username, + args.os_password, + args.os_user_domain_name, + args.os_region_name, + args.os_interface, + sw_update.STRATEGY_NAME_FW_UPDATE, + args.details) + else: + raise ValueError("Unknown command, %s, " + "given for fw-update-strategy" + % args.cmd) else: raise ValueError("Unknown command area, %s, given" % args.cmd_area) diff --git a/nfv/nfv-client/nfv_client/sw_update/__init__.py b/nfv/nfv-client/nfv_client/sw_update/__init__.py index f0f02e88..1006504c 100755 --- a/nfv/nfv-client/nfv_client/sw_update/__init__.py +++ b/nfv/nfv-client/nfv_client/sw_update/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016, 2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -14,5 +14,6 @@ from nfv_client.sw_update._sw_update import delete_strategy # noqa: F401 from nfv_client.sw_update._sw_update import INSTANCE_ACTION_MIGRATE # noqa: F401 from nfv_client.sw_update._sw_update import INSTANCE_ACTION_STOP_START # noqa: F401 from nfv_client.sw_update._sw_update import show_strategy # noqa: F401 +from nfv_client.sw_update._sw_update import STRATEGY_NAME_FW_UPDATE # noqa: F401 from nfv_client.sw_update._sw_update import STRATEGY_NAME_SW_PATCH # noqa: F401 from nfv_client.sw_update._sw_update import STRATEGY_NAME_SW_UPGRADE # noqa: F401 diff --git a/nfv/nfv-client/nfv_client/sw_update/_sw_update.py b/nfv/nfv-client/nfv_client/sw_update/_sw_update.py index 25f7cebe..459e958a 100755 --- a/nfv/nfv-client/nfv_client/sw_update/_sw_update.py +++ b/nfv/nfv-client/nfv_client/sw_update/_sw_update.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016, 2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -8,6 +8,7 @@ from nfv_client.openstack import sw_update STRATEGY_NAME_SW_PATCH = 'sw-patch' STRATEGY_NAME_SW_UPGRADE = 'sw-upgrade' +STRATEGY_NAME_FW_UPDATE = 'fw-update' APPLY_TYPE_SERIAL = 'serial' APPLY_TYPE_PARALLEL = 'parallel' @@ -105,6 +106,8 @@ def _display_strategy(strategy, details=False): print("Strategy Patch Strategy:") elif strategy.name == STRATEGY_NAME_SW_UPGRADE: print("Strategy Upgrade Strategy:") + elif strategy.name == STRATEGY_NAME_FW_UPDATE: + print("Strategy Firmware Update Strategy:") else: print("Strategy Unknown Strategy:") diff --git a/nfv/nfv-common/nfv_common/alarm/objects/v1/_alarm_defs.py b/nfv/nfv-common/nfv_common/alarm/objects/v1/_alarm_defs.py index 118a2a0e..d2da9615 100755 --- a/nfv/nfv-common/nfv_common/alarm/objects/v1/_alarm_defs.py +++ b/nfv/nfv-common/nfv_common/alarm/objects/v1/_alarm_defs.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -41,6 +41,9 @@ class _AlarmType(Constants): SW_UPGRADE_AUTO_APPLY_INPROGRESS = Constant('sw-upgrade-auto-apply-inprogress') SW_UPGRADE_AUTO_APPLY_ABORTING = Constant('sw-upgrade-auto-apply-aborting') SW_UPGRADE_AUTO_APPLY_FAILED = Constant('sw-upgrade-auto-apply-failed') + FW_UPDATE_AUTO_APPLY_INPROGRESS = Constant('fw-update-auto-apply-inprogress') + FW_UPDATE_AUTO_APPLY_ABORTING = Constant('fw-update-auto-apply-aborting') + FW_UPDATE_AUTO_APPLY_FAILED = Constant('fw-update-auto-apply-failed') @six.add_metaclass(Singleton) diff --git a/nfv/nfv-common/nfv_common/event_log/objects/v1/_event_log_defs.py b/nfv/nfv-common/nfv_common/event_log/objects/v1/_event_log_defs.py index 63509f28..e40196df 100755 --- a/nfv/nfv-common/nfv_common/event_log/objects/v1/_event_log_defs.py +++ b/nfv/nfv-common/nfv_common/event_log/objects/v1/_event_log_defs.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -160,6 +160,17 @@ class _EventId(Constants): SW_UPGRADE_AUTO_APPLY_ABORT_REJECTED = Constant('sw-upgrade-auto-apply-abort-rejected') SW_UPGRADE_AUTO_APPLY_ABORT_FAILED = Constant('sw-upgrade-auto-apply-abort-failed') SW_UPGRADE_AUTO_APPLY_ABORTED = Constant('sw-upgrade-auto-apply-aborted') + FW_UPDATE_AUTO_APPLY_START = Constant('fw-update-auto-apply-started') + FW_UPDATE_AUTO_APPLY_INPROGRESS = Constant('fw-update-auto-apply-inprogress') + FW_UPDATE_AUTO_APPLY_REJECTED = Constant('fw-update-auto-apply-rejected') + FW_UPDATE_AUTO_APPLY_CANCELLED = Constant('fw-update-auto-apply-cancelled') + FW_UPDATE_AUTO_APPLY_FAILED = Constant('fw-update-auto-apply-failed') + FW_UPDATE_AUTO_APPLY_COMPLETED = Constant('fw-update-auto-apply-completed') + FW_UPDATE_AUTO_APPLY_ABORT = Constant('fw-update-auto-apply-abort') + FW_UPDATE_AUTO_APPLY_ABORTING = Constant('fw-update-auto-apply-aborting') + FW_UPDATE_AUTO_APPLY_ABORT_REJECTED = Constant('fw-update-auto-apply-abort-rejected') + FW_UPDATE_AUTO_APPLY_ABORT_FAILED = Constant('fw-update-auto-apply-abort-failed') + FW_UPDATE_AUTO_APPLY_ABORTED = Constant('fw-update-auto-apply-aborted') @six.add_metaclass(Singleton) diff --git a/nfv/nfv-plugins/nfv_plugins/alarm_handlers/fm.py b/nfv/nfv-plugins/nfv_plugins/alarm_handlers/fm.py index c268d56e..3d84c7bb 100755 --- a/nfv/nfv-plugins/nfv_plugins/alarm_handlers/fm.py +++ b/nfv/nfv-plugins/nfv_plugins/alarm_handlers/fm.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -72,6 +72,12 @@ _fm_alarm_id_mapping = dict([ fm_constants.FM_ALARM_ID_SW_UPGRADE_AUTO_APPLY_ABORTING), (alarm_objects_v1.ALARM_TYPE.SW_UPGRADE_AUTO_APPLY_FAILED, fm_constants.FM_ALARM_ID_SW_UPGRADE_AUTO_APPLY_FAILED), + (alarm_objects_v1.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_INPROGRESS, + fm_constants.FM_ALARM_ID_FW_UPDATE_AUTO_APPLY_INPROGRESS), + (alarm_objects_v1.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_ABORTING, + fm_constants.FM_ALARM_ID_FW_UPDATE_AUTO_APPLY_ABORTING), + (alarm_objects_v1.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_FAILED, + fm_constants.FM_ALARM_ID_FW_UPDATE_AUTO_APPLY_FAILED), ]) _fm_alarm_type_mapping = dict([ diff --git a/nfv/nfv-plugins/nfv_plugins/event_log_handlers/fm.py b/nfv/nfv-plugins/nfv_plugins/event_log_handlers/fm.py index ae78a7f8..186f6d1c 100755 --- a/nfv/nfv-plugins/nfv_plugins/event_log_handlers/fm.py +++ b/nfv/nfv-plugins/nfv_plugins/event_log_handlers/fm.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2016,2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -312,6 +312,28 @@ _fm_event_id_mapping = dict([ fm_constants.FM_LOG_ID_SW_UPGRADE_AUTO_APPLY_ABORT_FAILED), (event_log_objects_v1.EVENT_ID.SW_UPGRADE_AUTO_APPLY_ABORTED, fm_constants.FM_LOG_ID_SW_UPGRADE_AUTO_APPLY_ABORTED), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_START, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_START), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_INPROGRESS, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_INPROGRESS), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_REJECTED, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_REJECTED), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_CANCELLED, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_CANCELLED), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_FAILED, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_FAILED), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_COMPLETED, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_COMPLETED), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORT, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_ABORT), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORTING, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_ABORTING), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORT_REJECTED, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_ABORT_REJECTED), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORT_FAILED, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_ABORT_FAILED), + (event_log_objects_v1.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORTED, + fm_constants.FM_LOG_ID_FW_UPDATE_AUTO_APPLY_ABORTED), ]) _fm_event_type_mapping = dict([ diff --git a/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/nfvi_infrastructure_api.py b/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/nfvi_infrastructure_api.py index ad24b3d9..686157c2 100755 --- a/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/nfvi_infrastructure_api.py +++ b/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/nfvi_infrastructure_api.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2018 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -610,6 +610,229 @@ class NFVIInfrastructureAPI(nfvi.api.v1.NFVIInfrastructureAPI): callback.send(response) callback.close() + def get_host_devices(self, future, host_uuid, host_name, callback): + """ + Get host device list details + """ + response = dict() + response['completed'] = False + response['reason'] = '' + + try: + future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) + + if self._platform_token is None or \ + self._platform_token.is_expired(): + future.work(openstack.get_token, self._platform_directory) + future.result = (yield) + + if not future.result.is_complete() or \ + future.result.data is None: + DLOG.error("OpenStack get-token did not complete, " + "host_uuid=%s." % host_uuid) + return + + self._platform_token = future.result.data + + future.work(sysinv.get_host_devices, + self._platform_token, host_uuid) + future.result = (yield) + + if not future.result.is_complete(): + return + + host_data = future.result.data + + response['result-data'] = host_data + response['completed'] = True + + except exceptions.OpenStackRestAPIException as e: + if httplib.UNAUTHORIZED == e.http_status_code: + response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED + if self._platform_token is not None: + self._platform_token.set_expired() + + else: + DLOG.exception("Caught exception trying to get_host_devices" + "details, host=%s, error=%s." % (host_name, e)) + + except Exception as e: + DLOG.exception("Caught exception trying to get_host_devices " + "details, host=%s, error=%s." % (host_name, e)) + + finally: + callback.send(response) + callback.close() + + def get_host_device(self, future, host_uuid, host_name, + device_uuid, device_name, callback): + """ + Get host device details + """ + response = dict() + response['completed'] = False + response['reason'] = '' + + try: + future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) + + if self._platform_token is None or \ + self._platform_token.is_expired(): + future.work(openstack.get_token, self._platform_directory) + future.result = (yield) + + if not future.result.is_complete() or \ + future.result.data is None: + DLOG.error("OpenStack get-token did not complete, " + "host_uuid=%s." % host_uuid) + return + + self._platform_token = future.result.data + + future.work(sysinv.get_host_device, + self._platform_token, device_uuid) + future.result = (yield) + + if not future.result.is_complete(): + return + + host_data = future.result.data + + response['result-data'] = host_data + response['completed'] = True + + except exceptions.OpenStackRestAPIException as e: + if httplib.UNAUTHORIZED == e.http_status_code: + response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED + if self._platform_token is not None: + self._platform_token.set_expired() + + else: + DLOG.exception("Caught exception trying to get_host_device " + "details, host=%s, device=%s, error=%s." % + (host_name, device_name, e)) + + except Exception as e: + DLOG.exception("Caught exception trying to get_host_device " + "details, host=%s, device=%s, error=%s." % + (host_name, device_name, e)) + + finally: + callback.send(response) + callback.close() + + def host_device_image_update(self, future, host_uuid, host_name, callback): + """ + Update a host device image + """ + response = dict() + response['completed'] = False + response['reason'] = '' + + try: + future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) + + if self._platform_token is None or \ + self._platform_token.is_expired(): + future.work(openstack.get_token, self._platform_directory) + future.result = (yield) + + if not future.result.is_complete() or \ + future.result.data is None: + DLOG.error("OpenStack get-token did not complete, " + "host_uuid=%s." % host_uuid) + return + + self._platform_token = future.result.data + + future.work(sysinv.host_device_image_update, + self._platform_token, host_uuid) + future.result = (yield) + + if not future.result.is_complete(): + return + + host_data = future.result.data + + response['result-data'] = host_data + response['completed'] = True + + except exceptions.OpenStackRestAPIException as e: + if httplib.UNAUTHORIZED == e.http_status_code: + response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED + if self._platform_token is not None: + self._platform_token.set_expired() + + else: + DLOG.exception("Caught exception requesting a host device " + "image update, host=%s, error=%s." % + (host_name, e)) + + except Exception as e: + DLOG.exception("Caught exception requesting a host device " + "image update, host=%s, error=%s." % + (host_name, e)) + + finally: + callback.send(response) + callback.close() + + def host_device_image_update_abort(self, future, host_uuid, host_name, callback): + """ + Abort a host device image update + """ + response = dict() + response['completed'] = False + response['reason'] = '' + + try: + future.set_timeouts(config.CONF.get('nfvi-timeouts', None)) + + if self._platform_token is None or \ + self._platform_token.is_expired(): + future.work(openstack.get_token, self._platform_directory) + future.result = (yield) + + if not future.result.is_complete() or \ + future.result.data is None: + DLOG.error("OpenStack get-token did not complete, " + "host_uuid=%s." % host_uuid) + return + + self._platform_token = future.result.data + + future.work(sysinv.host_device_image_update_abort, + self._platform_token, host_uuid) + future.result = (yield) + + if not future.result.is_complete(): + return + + host_data = future.result.data + + response['result-data'] = host_data + response['completed'] = True + + except exceptions.OpenStackRestAPIException as e: + if httplib.UNAUTHORIZED == e.http_status_code: + response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED + if self._platform_token is not None: + self._platform_token.set_expired() + + else: + DLOG.exception("Caught exception requesting host device " + "image update abort, host=%s, error=%s." % + (host_name, e)) + + except Exception as e: + DLOG.exception("Caught exception requesting host device " + "image update abort, host=%s, error=%s." % + (host_name, e)) + + finally: + callback.send(response) + callback.close() + def get_upgrade(self, future, callback): """ Get information about the upgrade from the plugin diff --git a/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/openstack/sysinv.py b/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/openstack/sysinv.py index 0894b182..b95d0f08 100755 --- a/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/openstack/sysinv.py +++ b/nfv/nfv-plugins/nfv_plugins/nfvi_plugins/openstack/sysinv.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2018 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -476,3 +476,81 @@ def swact_from_host(token, host_uuid): response = rest_api_request(token, "PATCH", api_cmd, api_cmd_headers, json.dumps(api_cmd_payload)) return response + + +def get_host_devices(token, host_uuid): + """ + Asks System Inventory for host device details + """ + url = token.get_service_url(PLATFORM_SERVICE.SYSINV) + if url is None: + raise ValueError("OpenStack SysInv URL is invalid") + + api_cmd = url + "/ihosts/%s/pci_devices" % host_uuid + + api_cmd_headers = dict() + api_cmd_headers['Content-Type'] = "application/json" + api_cmd_headers['User-Agent'] = "vim/1.0" + + response = rest_api_request(token, "GET", api_cmd, api_cmd_headers) + return response + + +def get_host_device(token, device_uuid): + """ + Asks System Inventory for host details for specific device + """ + url = token.get_service_url(PLATFORM_SERVICE.SYSINV) + if url is None: + raise ValueError("OpenStack SysInv URL is invalid") + + api_cmd = url + "/pci_devices/%s" % device_uuid + + api_cmd_headers = dict() + api_cmd_headers['Content-Type'] = "application/json" + api_cmd_headers['User-Agent'] = "vim/1.0" + + response = rest_api_request(token, "GET", api_cmd, api_cmd_headers) + return response + + +def host_device_image_update(token, host_uuid): + """ + Asks System Inventory to start a host device image update + """ + url = token.get_service_url(PLATFORM_SERVICE.SYSINV) + if url is None: + raise ValueError("OpenStack SysInv URL is invalid") + + api_cmd = url + "/ihosts/%s/device_image_update" % host_uuid + + api_cmd_headers = dict() + api_cmd_headers['Content-Type'] = "application/json" + api_cmd_headers['User-Agent'] = "vim/1.0" + + api_cmd_payload = dict() + + response = rest_api_request(token, "POST", api_cmd, api_cmd_headers, + json.dumps(api_cmd_payload)) + return response + + +def host_device_image_update_abort(token, host_uuid): + """ + Asks System Inventory to abort a host device image update + """ + url = token.get_service_url(PLATFORM_SERVICE.SYSINV) + if url is None: + raise ValueError("OpenStack SysInv URL is invalid") + + api_cmd = url + "/ihosts/%s/device_image_update_abort" % host_uuid + + api_cmd_headers = dict() + api_cmd_headers['Content-Type'] = "application/json" + api_cmd_headers['User-Agent'] = "vim/1.0" + + api_cmd_payload = dict() + + response = rest_api_request(token, "POST", api_cmd, api_cmd_headers, + json.dumps(api_cmd_payload)) + return response diff --git a/nfv/nfv-tests/nfv_api_tests/vim_orchestration_test_cases.txt b/nfv/nfv-tests/nfv_api_tests/vim_orchestration_test_cases.txt index 045f1ef4..7580b8bf 100644 --- a/nfv/nfv-tests/nfv_api_tests/vim_orchestration_test_cases.txt +++ b/nfv/nfv-tests/nfv_api_tests/vim_orchestration_test_cases.txt @@ -126,3 +126,74 @@ cat > delete.txt << EOF {} EOF curl -i -X DELETE -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/sw-upgrade/strategy -d @delete.txt + +Firmware Update Orchestration +============================= + +TENANT_ID=`openstack token issue | grep "| project_id |" | cut -f3 -d'|' | tr -d '[[:space:]]'` +TOKEN_ID=`openstack token issue | grep "| id |" | cut -f3 -d'|' | tr -d '[[:space:]]'` + +Create strategy +--------------- +cat > create_serial.txt << EOF +{ + "controller-apply-type": "ignore", + "default-instance-action": "stop-start", + "worker-apply-type": "serial", + "storage-apply-type": "ignore", + "alarm-restrictions": "relaxed" +} +EOF +curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.1:4545/api/orchestration/fw-update/strategy -d @create_serial.txt + +cat > create_parallel.txt << EOF +{ + "controller-apply-type": "ignore", + "default-instance-action": "migrate", + "worker-apply-type": "parallel", + "max-parallel-worker-hosts": "3", + "storage-apply-type": "ignore", + "alarm-restrictions": "relaxed" +} +EOF +curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/fw-update/strategy -d @create_parallel.txt + +Show strategy +------------- +curl -i -X GET -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/fw-update/strategy + +Abort strategy +-------------- +cat > abort.txt << EOF +{"action": "abort"} +EOF +curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/fw-update/strategy/actions -d @abort.txt + +Abort strategy stage +-------------------- +cat > abort_stage.txt << EOF +{"action": "abort-stage", "stage-id": "3"} +EOF +curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/fw-update/strategy/actions -d @abort_stage.txt + +Apply strategy +-------------- +cat > apply.txt << EOF +{"action": "apply-all"} +EOF +curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/fw-update/strategy/actions -d @apply.txt + +Apply strategy stage +-------------------- +cat > apply_stage.txt << EOF +{"action": "apply-stage", "stage-id": "3"} +EOF +curl -i -X POST -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/fw-update/strategy/actions -d @apply_stage.txt + +Delete strategy +--------------- +cat > delete.txt << EOF +{} +EOF +curl -i -X DELETE -H "Accept: application/json" -H "X-Auth-Token: ${TOKEN_ID}" -H "Content-Type: application/json" http://192.168.204.2:4545/api/orchestration/fw-update/strategy -d @delete.txt + diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/sw_update_testcase.py b/nfv/nfv-tests/nfv_unit_tests/tests/sw_update_testcase.py new file mode 100755 index 00000000..6b8ab05f --- /dev/null +++ b/nfv/nfv-tests/nfv_unit_tests/tests/sw_update_testcase.py @@ -0,0 +1,337 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import fixtures + +import pprint +import uuid + +from nfv_vim import host_fsm +from nfv_vim import nfvi +from nfv_vim import objects + +from nfv_vim.strategy._strategy import strategy_rebuild_from_dict +from nfv_vim.tables._host_aggregate_table import HostAggregateTable +from nfv_vim.tables._host_group_table import HostGroupTable +from nfv_vim.tables._host_table import HostTable +from nfv_vim.tables._instance_group_table import InstanceGroupTable +from nfv_vim.tables._instance_table import InstanceTable +from nfv_vim.tables._table import Table + +from . import testcase # noqa: H304 +from . import utils # noqa: H304 + +from nfv_vim.objects import HOST_PERSONALITY + +DEBUG_PRINTING = False + + +def validate_strategy_persists(strategy): + """ + Validate that the strategy can be converted to a dict and back without any + loss of data. + Note: This is not foolproof - it won't catch cases where the an object + attribute was missed from both the as_dict and from_dict methods. + """ + strategy_dict = strategy.as_dict() + new_strategy = strategy_rebuild_from_dict(strategy_dict) + + if DEBUG_PRINTING: + if strategy.as_dict() != new_strategy.as_dict(): + print("==================== Strategy ====================") + pprint.pprint(strategy.as_dict()) + print("============== Converted Strategy ================") + pprint.pprint(new_strategy.as_dict()) + assert strategy.as_dict() == new_strategy.as_dict(), \ + "Strategy changed when converting to/from dict" + + +def validate_phase(phase, expected_results): + """ + Validate that the phase matches everything contained in expected_results + Note: there is probably a super generic, pythonic way to do this, but this + is good enough (tm). + """ + if DEBUG_PRINTING: + print("====================== Phase Results ========================") + pprint.pprint(phase) + print("===================== Expected Results ======================") + pprint.pprint(expected_results) + + for key in expected_results: + if key == 'stages': + stage_number = 0 + for stage in expected_results[key]: + apply_stage = phase[key][stage_number] + for stages_key in stage: + if stages_key == 'steps': + step_number = 0 + for step in stage[stages_key]: + apply_step = apply_stage[stages_key][step_number] + for step_key in step: + assert apply_step[step_key] == step[step_key], \ + "for [%s][%d][%s][%d][%s] found: %s but expected: %s" % \ + (key, stage_number, stages_key, + step_number, step_key, + apply_step[step_key], step[step_key]) + step_number += 1 + else: + assert apply_stage[stages_key] == stage[stages_key], \ + "for [%s][%d][%s] found: %s but expected: %s" % \ + (key, stage_number, stages_key, + apply_stage[stages_key], stage[stages_key]) + stage_number += 1 + else: + assert phase[key] == expected_results[key], \ + "for [%s] found: %s but expected: %s" % \ + (key, phase[key], expected_results[key]) + + +def fake_save(a): + pass + + +def fake_timer(a, b, c, d): + return 1234 + + +def fake_host_name(): + return 'controller-0' + + +def fake_host_name_controller_1(): + return 'controller-1' + + +def fake_host_name_controller_0(): + return 'controller-0' + + +def fake_callback(): + return + + +def fake_event_issue(a, b, c, d): + """ + Mock out the _event_issue function because it is being called when instance + objects are created. It ends up trying to communicate with another thread + (that doesn't exist) and this eventually leads to nosetests hanging if + enough events are issued. + """ + return None + + +def fake_nfvi_compute_plugin_disabled(): + return False + + +class SwUpdateStrategyTestCase(testcase.NFVTestCase): + + def setUp(self): + """ + Setup for testing. + """ + super(SwUpdateStrategyTestCase, self).setUp() + self._tenant_table = Table() + self._instance_type_table = Table() + self._instance_table = InstanceTable() + self._instance_group_table = InstanceGroupTable() + self._host_table = HostTable() + self._host_group_table = HostGroupTable() + self._host_aggregate_table = HostAggregateTable() + + # Don't attempt to write to the database while unit testing + self._tenant_table.persist = False + self._instance_type_table.persist = False + self._instance_table.persist = False + self._instance_group_table.persist = False + self._host_table.persist = False + self._host_group_table.persist = False + self._host_aggregate_table.persist = False + + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._tenant_table._tenant_table', + self._tenant_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_table._host_table', + self._host_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_group_table._instance_group_table', + self._instance_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_group_table._host_group_table', + self._host_group_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', + self._host_aggregate_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_table._instance_table', + self._instance_table)) + self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_type_table._instance_type_table', + self._instance_type_table)) + + instance_type_uuid = str(uuid.uuid4()) + instance_type = objects.InstanceType(instance_type_uuid, 'small') + instance_type.update_details(vcpus=1, + mem_mb=64, + disk_gb=1, + ephemeral_gb=0, + swap_gb=0, + guest_services=None, + auto_recovery=True, + live_migration_timeout=800, + live_migration_max_downtime=500) + self._instance_type_table[instance_type_uuid] = instance_type + + def tearDown(self): + """ + Cleanup testing setup. + """ + super(SwUpdateStrategyTestCase, self).tearDown() + self._tenant_table.clear() + self._instance_type_table.clear() + self._instance_table.clear() + self._instance_group_table.clear() + self._host_table.clear() + self._host_group_table.clear() + self._host_aggregate_table.clear() + + def create_instance(self, instance_type_name, instance_name, host_name, + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED): + """ + Create an instance + """ + tenant_uuid = str(uuid.uuid4()) + image_uuid = str(uuid.uuid4()) + + tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) + self._tenant_table[tenant_uuid] = tenant + + for instance_type in self._instance_type_table.values(): + if instance_type.name == instance_type_name: + instance_uuid = str(uuid.uuid4()) + + nfvi_instance = nfvi.objects.v1.Instance( + instance_uuid, instance_name, tenant_uuid, + admin_state=admin_state, + oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, + avail_status=list(), + action=nfvi.objects.v1.INSTANCE_ACTION.NONE, + host_name=host_name, + instance_type=utils.instance_type_to_flavor_dict( + instance_type), + image_uuid=image_uuid) + + instance = objects.Instance(nfvi_instance) + self._instance_table[instance.uuid] = instance + return + + assert 0, "Unknown instance_type_name: %s" % instance_type_name + + def create_instance_group(self, name, members, policies): + """ + Create an instance group + """ + member_uuids = [] + + for instance_uuid, instance in self._instance_table.items(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_instance_group = nfvi.objects.v1.InstanceGroup( + uuid=str(uuid.uuid4()), + name=name, + member_uuids=member_uuids, + policies=policies + ) + + instance_group = objects.InstanceGroup(nfvi_instance_group) + self._instance_group_table[instance_group.uuid] = instance_group + + def create_host(self, + host_name, + aio=False, + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED, + oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, + avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, + software_load='12.01', + target_load='12.01', + openstack_installed=True): + """ + Create a host + """ + personality = '' + + openstack_control = False + openstack_compute = False + + if host_name.startswith('controller'): + personality = HOST_PERSONALITY.CONTROLLER + if aio: + personality = personality + ',' + HOST_PERSONALITY.WORKER + if openstack_installed: + openstack_control = True + if aio: + openstack_compute = True + elif host_name.startswith('compute'): + personality = HOST_PERSONALITY.WORKER + if openstack_installed: + openstack_compute = True + elif host_name.startswith('storage'): + personality = HOST_PERSONALITY.STORAGE + else: + assert 0, "Invalid host_name: %s" % host_name + + nfvi_host = nfvi.objects.v1.Host( + uuid=str(uuid.uuid4()), + name=host_name, + personality=personality, + admin_state=admin_state, + oper_state=oper_state, + avail_status=avail_status, + action=nfvi.objects.v1.HOST_ACTION.NONE, + software_load=software_load, + target_load=target_load, + openstack_compute=openstack_compute, + openstack_control=openstack_control, + remote_storage=False, + uptime='1000' + ) + + if admin_state == nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED: + host = objects.Host(nfvi_host, + initial_state=host_fsm.HOST_STATE.ENABLED) + else: + host = objects.Host(nfvi_host, + initial_state=host_fsm.HOST_STATE.DISABLED) + + self._host_table[host.name] = host + + def create_host_group(self, name, members, policies): + """ + Create a host group + """ + member_uuids = [] + + for instance_uuid, instance in self._instance_table.items(): + if instance.name in members: + member_uuids.append(instance_uuid) + + nfvi_host_group = nfvi.objects.v1.HostGroup( + name=name, + member_names=members, + policies=policies + ) + + host_group = objects.HostGroup(nfvi_host_group) + self._host_group_table[host_group.name] = host_group + + def create_host_aggregate(self, name, host_names): + """ + Create a host aggregate + """ + nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( + name=name, + host_names=host_names, + availability_zone='' + ) + + host_aggregate = objects.HostAggregate(nfvi_host_aggregate) + self._host_aggregate_table[host_aggregate.name] = host_aggregate diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_fw_update_strategy.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_fw_update_strategy.py new file mode 100755 index 00000000..19a8b1a6 --- /dev/null +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_fw_update_strategy.py @@ -0,0 +1,2076 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +import mock +import uuid + +from nfv_vim import nfvi + +from nfv_vim.strategy._strategy import FwUpdateStrategy + +from nfv_vim.objects import HOST_PERSONALITY +from nfv_vim.objects import SW_UPDATE_ALARM_RESTRICTION +from nfv_vim.objects import SW_UPDATE_APPLY_TYPE +from nfv_vim.objects import SW_UPDATE_INSTANCE_ACTION + +from . import sw_update_testcase # noqa: H304 + + +def create_fw_update_strategy( + controller_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, + storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, + worker_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, + max_parallel_worker_hosts=2, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START, + alarm_restrictions=SW_UPDATE_ALARM_RESTRICTION.STRICT, + single_controller=False): + """ + Create a firmware update strategy + """ + return FwUpdateStrategy( + uuid=str(uuid.uuid4()), + controller_apply_type=controller_apply_type, + storage_apply_type=storage_apply_type, + worker_apply_type=worker_apply_type, + max_parallel_worker_hosts=max_parallel_worker_hosts, + default_instance_action=default_instance_action, + alarm_restrictions=alarm_restrictions, + ignore_alarms=[], + single_controller=single_controller + ) + + +@mock.patch('nfv_vim.nfvi.nfvi_compute_plugin_disabled', sw_update_testcase.fake_nfvi_compute_plugin_disabled) +class TestFwUpdateStrategy(sw_update_testcase.SwUpdateStrategyTestCase): + """ + Firmware Update Strategy Unit Tests + """ + + def test_fw_update_strategy_worker_stages_ignore(self): + """ + Test the fw_update strategy add worker strategy stages: + - ignore worker apply + Verify: + - stages not created ; fw update is only supported for worker nodes + """ + + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('compute-0') + self.create_host('compute-1') + + # default apply type is 'ignore' for all node types. + # Only worker nodes support firmware upgrade. + strategy = create_fw_update_strategy() + + worker_hosts = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + worker_hosts.append(host) + + success, reason = strategy._add_worker_strategy_stages( + sorted(worker_hosts, key=lambda host: host.name), + reboot=True) + + assert success is True, "Strategy creation failed" + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'completion_percentage': 100, + 'total_stages': 0 + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_storage_serial_no_instances(self): + """ + Test the fw_update strategy on a storage system: + - 2 controllers + - 2 storage hosts + - 4 worker hosts + options + - serial apply + - no instances + """ + + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 4, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts'}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts'}, + {'name': 'lock-hosts', + 'entity_names': ['compute-1']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-1']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts'}, + {'name': 'lock-hosts', + 'entity_names': ['compute-2']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-2']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts'}, + {'name': 'lock-hosts', + 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_storage_parallel_no_instances(self): + """ + Test the fw_update strategy on a storage system: + - 2 controllers + - 2 storage hosts + - 4 worker hosts + options + - parallel apply ; max 3 + - no instances + """ + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + max_parallel_worker_hosts=3) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 2, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-1', 'compute-2']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-1', 'compute-2']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-1', 'compute-2']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-3']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_storage_serial_migrate(self): + """ + Test the fw_update strategy on a storage system: + - 2 controllers + - 2 storage hosts + - 4 worker hosts + options + - serial apply + - migrate 3 instances + """ + + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_1", 'compute-1') + self.create_instance('small', "test_instance_2", 'compute-3') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 4, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-2']}, + {'name': 'lock-hosts', 'entity_names': ['compute-2']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-2']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-0']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'lock-hosts', 'entity_names': ['compute-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-1']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_1']}, + {'name': 'lock-hosts', 'entity_names': ['compute-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-3']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_2']}, + {'name': 'lock-hosts', 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_storage_parallel_migrate(self): + """ + Test the fw_update strategy on a storage system: + - 2 controllers + - 2 storage hosts + - 4 worker hosts + options + - parallel apply ; max 4 + - migrate 4 instances + """ + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('storage-0') + self.create_host('storage-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + + self.create_instance('small', "test_instance_1", 'compute-1') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + max_parallel_worker_hosts=4) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 2, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-5']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-5']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-5']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-1', 'compute-2', + 'compute-3', 'compute-4']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_1', + 'test_instance_2', + 'test_instance_3', + 'test_instance_4']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-1', 'compute-2', + 'compute-3', 'compute-4']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-1', 'compute-2', + 'compute-3', 'compute-4']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_standard_serial_stop_start(self): + """ + Test the fw_update strategy on a storage system: + - 2 controllers + - 2 storage hosts + - 4 worker hosts + options + - serial apply + - stop start 4 instances + """ + + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_1", 'compute-2') + self.create_instance('small', "test_instance_2", 'compute-4') + self.create_instance('small', "test_instance_3", 'compute-6') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 8, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-1']}, + {'name': 'lock-hosts', 'entity_names': ['compute-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-3']}, + {'name': 'lock-hosts', 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-3']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-5']}, + {'name': 'lock-hosts', 'entity_names': ['compute-5']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-5']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-7']}, + {'name': 'lock-hosts', 'entity_names': ['compute-7']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-7']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-0']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'lock-hosts', 'entity_names': ['compute-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-0']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-2']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_1']}, + {'name': 'lock-hosts', 'entity_names': ['compute-2']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-2']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-4']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_2']}, + {'name': 'lock-hosts', 'entity_names': ['compute-4']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-4']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_2']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', 'entity_names': ['compute-6']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_3']}, + {'name': 'lock-hosts', 'entity_names': ['compute-6']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', 'entity_names': ['compute-6']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_3']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_standard_parallel_stop_start(self): + """ + Test the fw_update strategy on a standard system: + - 2 controllers + - 8 worker hosts + options + - parallel apply ; max 10 + - stop start 8 instances + """ + + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_1", 'compute-1') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_5", 'compute-5') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START, + max_parallel_worker_hosts=10) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 1, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3', + 'compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_0', 'test_instance_1', + 'test_instance_2', 'test_instance_3', + 'test_instance_4', 'test_instance_5', + 'test_instance_6', 'test_instance_7']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3', + 'compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3', + 'compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_0', 'test_instance_1', + 'test_instance_2', 'test_instance_3', + 'test_instance_4', 'test_instance_5', + 'test_instance_6', 'test_instance_7']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_standard_parallel_migrate_host_aggregate(self): + """ + Test the fw_update strategy on a storage system: + - 2 controllers + - 10 worker hosts + options + - parallel apply ; max 10 + - migrate instances ; 1 per host ; 1 locked + - hosts with no instances updated first + - host aggregate limits enforced + """ + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') + + self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-2', + 'compute-4', 'compute-6']) + + self.create_host_aggregate('aggregate-2', ['compute-1', 'compute-3', + 'compute-5', 'compute-7']) + + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_1", 'compute-1') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3', + admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_5", 'compute-5') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + max_parallel_worker_hosts=10) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 3, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-8', 'compute-9']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-8', 'compute-9']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-8', 'compute-9']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3']}, + {'name': 'disable-host-services', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_0', 'test_instance_1', + 'test_instance_2']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'disable-host-services', + 'entity_names': ['compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_4', 'test_instance_5', + 'test_instance_6', 'test_instance_7']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_standard_parallel_stop_start_host_aggregate(self): + """ + Test the fw_update strategy on a standard system: + - 2 controllers + - 10 worker hosts + options + - parallel apply ; max 4 + - stop start instances ; 1 per host ; 1 locked + - locked instances or hosts with no instances updated first + - 2x4 host aggregate groups + - hosts with locked instances or none at all are grouped + """ + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') + + self.create_host_aggregate('aggregate-1', ['compute-0', 'compute-2', + 'compute-4', 'compute-6']) + + self.create_host_aggregate('aggregate-2', ['compute-1', 'compute-3', + 'compute-5', 'compute-7']) + + self.create_instance('small', "test_instance_0", 'compute-0', admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) + self.create_instance('small', "test_instance_1", 'compute-1', admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.LOCKED) + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_5", 'compute-5') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + self.create_instance('small', "test_instance_8", 'compute-8') + self.create_instance('small', "test_instance_9", 'compute-9') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START, + max_parallel_worker_hosts=4) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 4, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-8', 'compute-9']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_8', 'test_instance_9']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-8', 'compute-9']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-8', 'compute-9']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_8', 'test_instance_9']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-2', 'compute-3']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_2', 'test_instance_3']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-2', 'compute-3']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-2', 'compute-3']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_2', 'test_instance_3']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-4', 'compute-5']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_4', 'test_instance_5']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-4', 'compute-5']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-4', 'compute-5']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_4', 'test_instance_5']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-6', 'compute-7']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_6', 'test_instance_7']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-6', 'compute-7']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-6', 'compute-7']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_6', 'test_instance_7']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_standard_parallel_migrate_overlap_host_aggregate(self): + """ + Test the fw_update strategy on a standard system: + - 2 controllers + - 10 worker hosts + options + - parallel apply ; max 2 + - migrate 10 instances ; 1 per worker host + - locked instances or hosts with no instances updated first + - 3 host aggregate groups with overlap ; 4, 3, 10 + - hosts with locked instances or none at all are grouped + """ + + self.create_host('controller-0') + self.create_host('controller-1') + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + self.create_host('compute-8') + self.create_host('compute-9') + + self.create_host_aggregate('aggregate-1', ['compute-0', + 'compute-1', + 'compute-2', + 'compute-3', + 'compute-4']) + self.create_host_aggregate('aggregate-2', ['compute-5', + 'compute-6', + 'compute-7', + 'compute-8', + 'compute-9']) + self.create_host_aggregate('aggregate-3', ['compute-0', + 'compute-1', + 'compute-2', + 'compute-3', + 'compute-4', + 'compute-5', + 'compute-6', + 'compute-7', + 'compute-8', + 'compute-9']) + + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + self.create_instance('small', "test_instance_8", 'compute-8') + self.create_instance('small', "test_instance_9", 'compute-9') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + max_parallel_worker_hosts=2) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 5, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-1', 'compute-5']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-1', 'compute-5']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-1', 'compute-5']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-6']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_0', + 'test_instance_6']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-6']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-6']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-2', 'compute-7']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_2', + 'test_instance_7']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-2', 'compute-7']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-2', 'compute-7']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-3', 'compute-8']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_3', + 'test_instance_8']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-3', 'compute-8']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-3', 'compute-8']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-4', 'compute-9']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_4', + 'test_instance_9']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-4', 'compute-9']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-4', 'compute-9']}, + {'name': 'system-stabilize', + 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_sx(self): + """ + Test the fw_update strategy on an All-In-One system: + - 1 all-in-one controller + options + - serial apply + - no instances + """ + + self.create_host('controller-0', aio=True) + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + single_controller=True) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 1, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_sx_stop_start(self): + """ + Test the fw_update strategy on an All-In-One system: + - 1 all-in-one controller + options + - serial apply + - stop start 1 instance + """ + self.create_host('controller-0', aio=True) + + self.create_instance('small', "test_instance_0", 'controller-0') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START, + single_controller=True) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 1, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_sx_migrate_reject(self): + """ + Test the fw_update strategy on an All-In-One system: + - 1 all-in-one controller + options + - serial apply + - migrate instances ; not possible in sx + """ + self.create_host('controller-0', aio=True) + + self.create_instance('small', "test_instance_0", 'controller-0') + self.create_instance('small', "test_instance_1", 'controller-0') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + single_controller=True) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + success, reason = strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + assert success is False, "Strategy creation failed" + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'completion_percentage': 100, + 'total_stages': 0 + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_sx_serial_migrate_no_openstack(self): + """ + Test the sw_patch strategy add worker strategy stages: + - 1 all-in-one controller host + - no openstack + - serial apply + - migrate instance action with no instance + """ + self.create_host('controller-0', aio=True, openstack_installed=False) + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + single_controller=True) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 1, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', + 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', + 'timeout': 60}, + ] + }, + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_dx_no_instances(self): + """ + Test the fw_update strategy on an All-In-One system: + - 2 all-in-one controllers + options + - serial apply + - no instances + """ + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 2, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_dx_migrate_instance(self): + """ + Test the fw_update strategy on an All-In-One system: + - 2 all-in-one controllers + options + - serial apply + - migrate 1 instance + """ + + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + + self.create_instance('small', "test_instance_0", 'controller-0') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + single_controller=False) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 2, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_dx_migrate_instances(self): + """ + Test the fw_update strategy on an All-In-One system: + - 2 all-in-one controllers + options + - serial apply + - migrate 2 instances which switches the controller update order + """ + + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + + self.create_instance('small', "test_instance_0", 'controller-0') + self.create_instance('small', "test_instance_1", 'controller-1') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + single_controller=False) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 2, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_dx_stop_start_instance(self): + """ + Test the fw_update strategy on an All-In-One system: + - 2 all-in-one controllers + options + - serial + - stop start 1 instance + """ + + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + + self.create_instance('small', "test_instance_0", 'controller-0') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START, + single_controller=False) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + # assert success is False, "Strategy creation failed" + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 2, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 9, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_dx_stop_start_instances(self): + """ + Test the fw_update strategy on an All-In-One system: + - 2 all-in-one controllers + options + - serial + - stop start instances + """ + + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + + self.create_instance('small', "test_instance_0", 'controller-0') + self.create_instance('small', "test_instance_1", 'controller-1') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.SERIAL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START, + single_controller=False) + + fw_update_host_list = [] + for host in self._host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 2, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 9, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 9, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_dx_plus_parallel_locked_hosts(self): + """ + Test the fw_update strategy on an All-In-One system: + - 2 all-in-one controllers + - 6 computes ; 2 are locked + options + - parallel apply ; max 2 + - no instances + """ + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + + self.create_host('compute-0') + self.create_host('compute-1', + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED, + oper_state=nfvi.objects.v1.HOST_OPER_STATE.DISABLED, + avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.ONLINE) + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5', + admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.LOCKED, + oper_state=nfvi.objects.v1.HOST_OPER_STATE.DISABLED, + avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.ONLINE) + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + max_parallel_worker_hosts=2) + + fw_update_worker_host_list = [] + for host in self._host_table.values(): + if host._nfvi_host.admin_state == nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED: + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_worker_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_worker_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 4, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-2']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-2']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-2']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 6, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-3', 'compute-4']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-3', 'compute-4']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-3', 'compute-4']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_plus_parallel_migrate_anti_affinity(self): + """ + Test the fw_update strategy on an All-In-One Plus system: + - 2 all-in-one controllers + - 6 worker hosts + options + - parallel apply ; max 2 + - migrate instances with 2 anti affinity groups + """ + + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_1", 'compute-1') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_5", 'compute-5') + + self.create_instance_group('instance_group_1', + ['test_instance_0', 'test_instance_1'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + + self.create_instance_group('instance_group_2', + ['test_instance_3', 'test_instance_5'], + [nfvi.objects.v1.INSTANCE_GROUP_POLICY.ANTI_AFFINITY]) + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.MIGRATE, + max_parallel_worker_hosts=2) + + fw_update_worker_host_list = [] + for host in self._host_table.values(): + if host._nfvi_host.admin_state == nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED: + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_worker_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_worker_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 5, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-2']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_0', 'test_instance_2']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-2']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-2']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-1', 'compute-3']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_1', 'test_instance_3']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-1', 'compute-3']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-1', 'compute-3']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-4', 'compute-5']}, + {'name': 'disable-host-services'}, + {'name': 'migrate-instances', + 'entity_names': ['test_instance_4', 'test_instance_5']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-4', 'compute-5']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-4', 'compute-5']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) + + def test_fw_update_strategy_aio_plus_parallel_stop_start(self): + """ + Test the fw_update strategy on an All-In-One Plus system: + - 2 all-in-one controllers + - 8 worker hosts + options + - parallel apply ; max 10 + - stop start 8 instances + """ + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) + self.create_host('compute-0') + self.create_host('compute-1') + self.create_host('compute-2') + self.create_host('compute-3') + self.create_host('compute-4') + self.create_host('compute-5') + self.create_host('compute-6') + self.create_host('compute-7') + + self.create_instance('small', "test_instance_0", 'compute-0') + self.create_instance('small', "test_instance_1", 'compute-1') + self.create_instance('small', "test_instance_2", 'compute-2') + self.create_instance('small', "test_instance_3", 'compute-3') + self.create_instance('small', "test_instance_4", 'compute-4') + self.create_instance('small', "test_instance_5", 'compute-5') + self.create_instance('small', "test_instance_6", 'compute-6') + self.create_instance('small', "test_instance_7", 'compute-7') + + strategy = create_fw_update_strategy( + worker_apply_type=SW_UPDATE_APPLY_TYPE.PARALLEL, + default_instance_action=SW_UPDATE_INSTANCE_ACTION.STOP_START, + max_parallel_worker_hosts=10) + + fw_update_controller_host_list = [] + fw_update_worker_host_list = [] + for host in self._host_table.values(): + if host._nfvi_host.admin_state == nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED: + if HOST_PERSONALITY.WORKER in host.personality: + fw_update_worker_host_list.append(host) + + strategy._add_worker_strategy_stages( + sorted(fw_update_worker_host_list, key=lambda host: host.name), + reboot=True) + + apply_phase = strategy.apply_phase.as_dict() + + expected_results = { + 'total_stages': 3, + 'stages': [ + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-0']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-0']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-0']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 7, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['controller-1']}, + {'name': 'swact-hosts', + 'entity_names': ['controller-1']}, + {'name': 'lock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['controller-1']}, + {'name': 'system-stabilize', 'timeout': 60} + ] + }, + {'name': 'fw-update-worker-hosts', + 'total_steps': 8, + 'steps': [ + {'name': 'query-alarms'}, + {'name': 'fw-update-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3', + 'compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'stop-instances', + 'entity_names': ['test_instance_0', 'test_instance_1', + 'test_instance_2', 'test_instance_3', + 'test_instance_4', 'test_instance_5', + 'test_instance_6', 'test_instance_7']}, + {'name': 'lock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3', + 'compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'system-stabilize', 'timeout': 15}, + {'name': 'unlock-hosts', + 'entity_names': ['compute-0', 'compute-1', + 'compute-2', 'compute-3', + 'compute-4', 'compute-5', + 'compute-6', 'compute-7']}, + {'name': 'start-instances', + 'entity_names': ['test_instance_0', 'test_instance_1', + 'test_instance_2', 'test_instance_3', + 'test_instance_4', 'test_instance_5', + 'test_instance_6', 'test_instance_7']}, + {'name': 'system-stabilize', 'timeout': 60}, + ] + } + ] + } + + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_nfv_client.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_nfv_client.py index 5af2065b..47d8b83b 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_nfv_client.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_nfv_client.py @@ -54,6 +54,11 @@ class TestNFVClientShell(testcase.NFVTestCase): shell_args = ['patch-strategy', ] self._test_shell_bad_or_empty_args(shell_args=shell_args) + # fw-update-strategy expects additional arguments + def test_shell_fw_update_strategy_incomplete_args(self): + shell_args = ['fw-update-strategy', ] + self._test_shell_bad_or_empty_args(shell_args=shell_args) + # --- Help Cases ---- # -h will print_help and SystemExit @mock.patch('argparse.ArgumentParser.print_help') @@ -69,6 +74,10 @@ class TestNFVClientShell(testcase.NFVTestCase): shell_args = ['patch-strategy', '-h', ] self._test_shell_help(shell_args=shell_args) + def test_shell_fw_update_strategy_help(self): + shell_args = ['fw-update-strategy', '-h', ] + self._test_shell_help(shell_args=shell_args) + # -- Show commands -- # Both patch-strategy and upgrade-strategy use the same underlying # sw_update class, but with different modes @@ -87,6 +96,10 @@ class TestNFVClientShell(testcase.NFVTestCase): shell_args = ['patch-strategy', 'show', ] self._test_shell_show_missing_env(shell_args=shell_args) + def test_shell_fw_update_strategy_show_missing_env(self): + shell_args = ['fw-update-strategy', 'show', ] + self._test_shell_show_missing_env(shell_args=shell_args) + # Test the show commands are invoked when env values detected @mock.patch.dict(os.environ, {'OS_AUTH_URL': 'FAKE_OS_AUTH_URL', @@ -110,3 +123,7 @@ class TestNFVClientShell(testcase.NFVTestCase): def test_shell_patch_strategy_show(self): shell_args = ['patch-strategy', 'show', ] self._test_shell_show(shell_args=shell_args) + + def test_shell_fw_update_strategy_show(self): + shell_args = ['fw-update-strategy', 'show', ] + self._test_shell_show(shell_args=shell_args) diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py index e4659f79..a540fc25 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_patch_strategy.py @@ -1,37 +1,22 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import fixtures import mock -import pprint import uuid from nfv_common import strategy as common_strategy -from nfv_vim import host_fsm from nfv_vim import nfvi -from nfv_vim import objects from nfv_vim.objects import HOST_PERSONALITY from nfv_vim.objects import SW_UPDATE_ALARM_RESTRICTION from nfv_vim.objects import SW_UPDATE_APPLY_TYPE from nfv_vim.objects import SW_UPDATE_INSTANCE_ACTION from nfv_vim.objects import SwPatch -from nfv_vim.strategy._strategy import strategy_rebuild_from_dict from nfv_vim.strategy._strategy import SwPatchStrategy -from nfv_vim.tables._host_aggregate_table import HostAggregateTable -from nfv_vim.tables._host_group_table import HostGroupTable -from nfv_vim.tables._host_table import HostTable -from nfv_vim.tables._instance_group_table import InstanceGroupTable -from nfv_vim.tables._instance_table import InstanceTable -from nfv_vim.tables._table import Table -from . import testcase # noqa: H304 -from . import utils # noqa: H304 - - -DEBUG_PRINTING = False +from . import sw_update_testcase # noqa: H304 def create_sw_patch_strategy( @@ -60,297 +45,15 @@ def create_sw_patch_strategy( ) -def validate_strategy_persists(strategy): +@mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', sw_update_testcase.fake_save) +@mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', sw_update_testcase.fake_timer) +@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', sw_update_testcase.fake_host_name) +@mock.patch('nfv_vim.event_log._instance._event_issue', sw_update_testcase.fake_event_issue) +@mock.patch('nfv_vim.nfvi.nfvi_compute_plugin_disabled', sw_update_testcase.fake_nfvi_compute_plugin_disabled) +class TestSwPatchStrategy(sw_update_testcase.SwUpdateStrategyTestCase): """ - Validate that the strategy can be converted to a dict and back without any - loss of data. - Note: This is not foolproof - it won't catch cases where the an object - attribute was missed from both the as_dict and from_dict methods. + Software Patch Strategy Unit Tests """ - strategy_dict = strategy.as_dict() - new_strategy = strategy_rebuild_from_dict(strategy_dict) - - if DEBUG_PRINTING: - if strategy.as_dict() != new_strategy.as_dict(): - print("==================== Strategy ====================") - pprint.pprint(strategy.as_dict()) - print("============== Converted Strategy ================") - pprint.pprint(new_strategy.as_dict()) - assert strategy.as_dict() == new_strategy.as_dict(), \ - "Strategy changed when converting to/from dict" - - -def validate_phase(phase, expected_results): - """ - Validate that the phase matches everything contained in the expected_results - Note: there is probably a super generic, pythonic way to do this, but this - is good enough (tm). - """ - if DEBUG_PRINTING: - print("====================== Phase Results ========================") - pprint.pprint(phase) - print("===================== Expected Results ======================") - pprint.pprint(expected_results) - - for key in expected_results: - if key == 'stages': - stage_number = 0 - for stage in expected_results[key]: - apply_stage = phase[key][stage_number] - for stages_key in stage: - if stages_key == 'steps': - step_number = 0 - for step in stage[stages_key]: - apply_step = apply_stage[stages_key][step_number] - for step_key in step: - assert apply_step[step_key] == step[step_key], \ - "for [%s][%d][%s][%d][%s] found: %s but expected: %s" % \ - (key, stage_number, stages_key, - step_number, step_key, - apply_step[step_key], step[step_key]) - step_number += 1 - else: - assert apply_stage[stages_key] == stage[stages_key], \ - "for [%s][%d][%s] found: %s but expected: %s" % \ - (key, stage_number, stages_key, - apply_stage[stages_key], stage[stages_key]) - stage_number += 1 - else: - assert phase[key] == expected_results[key], \ - "for [%s] found: %s but expected: %s" % \ - (key, phase[key], expected_results[key]) - - -def fake_save(a): - pass - - -def fake_timer(a, b, c, d): - return 1234 - - -def fake_host_name(): - return 'controller-0' - - -def fake_event_issue(a, b, c, d): - """ - Mock out the _event_issue function because it is being called when instance - objects are created. It ends up trying to communicate with another thread - (that doesn't exist) and this eventually leads to nosetests hanging if - enough events are issued. - """ - return None - - -def fake_nfvi_compute_plugin_disabled(): - return False - - -@mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', fake_save) -@mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', fake_timer) -@mock.patch('nfv_vim.strategy._strategy.get_local_host_name', fake_host_name) -@mock.patch('nfv_vim.event_log._instance._event_issue', fake_event_issue) -@mock.patch('nfv_vim.nfvi.nfvi_compute_plugin_disabled', fake_nfvi_compute_plugin_disabled) -class TestSwPatchStrategy(testcase.NFVTestCase): - - def setUp(self): - """ - Setup for testing. - """ - super(TestSwPatchStrategy, self).setUp() - self._tenant_table = Table() - self._instance_type_table = Table() - self._instance_table = InstanceTable() - self._instance_group_table = InstanceGroupTable() - self._host_table = HostTable() - self._host_group_table = HostGroupTable() - self._host_aggregate_table = HostAggregateTable() - - # Don't attempt to write to the database while unit testing - self._tenant_table.persist = False - self._instance_type_table.persist = False - self._instance_table.persist = False - self._instance_group_table.persist = False - self._host_table.persist = False - self._host_group_table.persist = False - self._host_aggregate_table.persist = False - - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._tenant_table._tenant_table', - self._tenant_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_table._host_table', - self._host_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_group_table._instance_group_table', - self._instance_group_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_group_table._host_group_table', - self._host_group_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', - self._host_aggregate_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_table._instance_table', - self._instance_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_type_table._instance_type_table', - self._instance_type_table)) - - instance_type_uuid = str(uuid.uuid4()) - instance_type = objects.InstanceType(instance_type_uuid, 'small') - instance_type.update_details(vcpus=1, - mem_mb=64, - disk_gb=1, - ephemeral_gb=0, - swap_gb=0, - guest_services=None, - auto_recovery=True, - live_migration_timeout=800, - live_migration_max_downtime=500) - self._instance_type_table[instance_type_uuid] = instance_type - - def tearDown(self): - """ - Cleanup testing setup. - """ - super(TestSwPatchStrategy, self).tearDown() - self._tenant_table.clear() - self._instance_type_table.clear() - self._instance_table.clear() - self._instance_group_table.clear() - self._host_table.clear() - self._host_group_table.clear() - self._host_aggregate_table.clear() - - def create_instance(self, instance_type_name, instance_name, host_name, - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED): - """ - Create an instance - """ - tenant_uuid = str(uuid.uuid4()) - image_uuid = str(uuid.uuid4()) - - tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) - self._tenant_table[tenant_uuid] = tenant - - for instance_type in self._instance_type_table.values(): - if instance_type.name == instance_type_name: - instance_uuid = str(uuid.uuid4()) - - nfvi_instance = nfvi.objects.v1.Instance( - instance_uuid, instance_name, tenant_uuid, - admin_state=admin_state, - oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, - avail_status=list(), - action=nfvi.objects.v1.INSTANCE_ACTION.NONE, - host_name=host_name, - instance_type=utils.instance_type_to_flavor_dict( - instance_type), - image_uuid=image_uuid) - - instance = objects.Instance(nfvi_instance) - self._instance_table[instance.uuid] = instance - return - - assert 0, "Unknown instance_type_name: %s" % instance_type_name - - def create_instance_group(self, name, members, policies): - """ - Create an instance group - """ - member_uuids = [] - - for instance_uuid, instance in self._instance_table.items(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_instance_group = nfvi.objects.v1.InstanceGroup( - uuid=str(uuid.uuid4()), - name=name, - member_uuids=member_uuids, - policies=policies - ) - - instance_group = objects.InstanceGroup(nfvi_instance_group) - self._instance_group_table[instance_group.uuid] = instance_group - - def create_host(self, - host_name, - cpe=False, - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED, - openstack_installed=True): - """ - Create a host - """ - personality = '' - - openstack_control = False - openstack_compute = False - - if host_name.startswith('controller'): - personality = HOST_PERSONALITY.CONTROLLER - if cpe: - personality = personality + ',' + HOST_PERSONALITY.WORKER - if openstack_installed: - openstack_control = True - if cpe: - openstack_compute = True - elif host_name.startswith('compute'): - personality = HOST_PERSONALITY.WORKER - if openstack_installed: - openstack_compute = True - elif host_name.startswith('storage'): - personality = HOST_PERSONALITY.STORAGE - else: - assert 0, "Invalid host_name: %s" % host_name - - nfvi_host = nfvi.objects.v1.Host( - uuid=str(uuid.uuid4()), - name=host_name, - personality=personality, - admin_state=admin_state, - oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, - avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, - action=nfvi.objects.v1.HOST_ACTION.NONE, - software_load='12.01', - target_load='12.01', - openstack_compute=openstack_compute, - openstack_control=openstack_control, - remote_storage=False, - uptime='1000' - ) - - host = objects.Host(nfvi_host, - initial_state=host_fsm.HOST_STATE.ENABLED) - self._host_table[host.name] = host - - def create_host_group(self, name, members, policies): - """ - Create a host group - """ - member_uuids = [] - - for instance_uuid, instance in self._instance_table.items(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_host_group = nfvi.objects.v1.HostGroup( - name=name, - member_names=members, - policies=policies - ) - - host_group = objects.HostGroup(nfvi_host_group) - self._host_group_table[host_group.name] = host_group - - def create_host_aggregate(self, name, host_names): - """ - Create a host aggregate - """ - nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( - name=name, - host_names=host_names, - availability_zone='' - ) - - host_aggregate = objects.HostAggregate(nfvi_host_aggregate) - self._host_aggregate_table[host_aggregate.name] = host_aggregate - def test_sw_patch_strategy_worker_stages_ignore(self): """ Test the sw_patch strategy add worker strategy stages: @@ -399,8 +102,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): 'total_stages': 0 } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_migrate_anti_affinity(self): """ @@ -505,8 +208,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_migrate_ten_hosts(self): """ @@ -657,8 +360,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_migrate_host_aggregate(self): """ @@ -820,8 +523,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_migrate_overlap_host_aggregate(self): """ @@ -993,8 +696,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_migrate_small_host_aggregate(self): """ @@ -1162,8 +865,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity(self): """ @@ -1269,8 +972,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity_locked_instance(self): """ @@ -1420,8 +1123,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches. strategy = create_sw_patch_strategy( @@ -1462,8 +1165,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_stop_start_locked_host(self): """ @@ -1546,8 +1249,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate_locked_instance(self): """ @@ -1651,8 +1354,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_stop_start_host_aggregate_single_host(self): """ @@ -1718,8 +1421,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_stop_start_anti_affinity_host_aggregate(self): """ @@ -1817,8 +1520,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_serial_stop_start(self): """ @@ -1938,8 +1641,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches strategy = create_sw_patch_strategy( @@ -1998,8 +1701,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_serial_stop_start_locked_host(self): """ @@ -2130,8 +1833,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches strategy = create_sw_patch_strategy( @@ -2190,8 +1893,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_parallel_stop_start_max_hosts(self): """ @@ -2306,8 +2009,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_serial_migrate(self): """ @@ -2426,8 +2129,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches strategy = create_sw_patch_strategy( @@ -2486,8 +2189,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_worker_stages_serial_migrate_locked_instance(self): """ @@ -2595,8 +2298,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_storage_stages_ignore(self): """ @@ -2641,8 +2344,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): 'total_stages': 0 } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_storage_stages_parallel_host_group(self): """ @@ -2729,8 +2432,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches strategy = create_sw_patch_strategy( @@ -2768,8 +2471,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_storage_stages_serial(self): """ @@ -2898,8 +2601,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_controller_stages_ignore(self): """ @@ -2932,8 +2635,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): 'total_stages': 0 } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_controller_stages_serial(self): """ @@ -3003,8 +2706,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches strategy = create_sw_patch_strategy( @@ -3042,19 +2745,19 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_stages_parallel_stop_start(self): + def test_sw_patch_strategy_aio_stages_parallel_stop_start(self): """ Test the sw_patch strategy add worker strategy stages: - - cpe hosts + - aio hosts - parallel apply treated as serial - stop start instance action - test both reboot and no reboot cases """ - self.create_host('controller-0', cpe=True) - self.create_host('controller-1', cpe=True) + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) self.create_instance('small', "test_instance_0", @@ -3131,8 +2834,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches strategy = create_sw_patch_strategy( @@ -3169,18 +2872,18 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_stages_serial_stop_start(self): + def test_sw_patch_strategy_aio_stages_serial_stop_start(self): """ Test the sw_patch strategy add worker strategy stages: - - cpe hosts + - aio hosts - serial apply - stop start instance action """ - self.create_host('controller-0', cpe=True) - self.create_host('controller-1', cpe=True) + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) self.create_instance('small', "test_instance_0", @@ -3256,19 +2959,19 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_stages_serial_stop_start_no_instances(self): + def test_sw_patch_strategy_aio_stages_serial_stop_start_no_instances(self): """ Test the sw_patch strategy add worker strategy stages: - - cpe hosts + - aio hosts - no instances - serial apply - stop start instance action """ - self.create_host('controller-0', cpe=True) - self.create_host('controller-1', cpe=True) + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) worker_hosts = [] for host in self._host_table.values(): @@ -3329,19 +3032,19 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_plus_stages_parallel_stop_start(self): + def test_sw_patch_strategy_aio_plus_stages_parallel_stop_start(self): """ Test the sw_patch strategy add worker strategy stages: - - cpe hosts plus workers + - aio hosts plus workers - parallel apply treated as serial - stop start instance action - test both reboot and no reboot cases """ - self.create_host('controller-0', cpe=True) - self.create_host('controller-1', cpe=True) + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) self.create_instance('small', "test_instance_0", @@ -3448,8 +3151,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) # Test no reboot patches strategy = create_sw_patch_strategy( @@ -3495,18 +3198,18 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_plus_stages_serial_stop_start(self): + def test_sw_patch_strategy_aio_plus_stages_serial_stop_start(self): """ Test the sw_patch strategy add worker strategy stages: - - cpe hosts plus workers + - aio hosts plus workers - serial apply - stop start instance action """ - self.create_host('controller-0', cpe=True) - self.create_host('controller-1', cpe=True) + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) self.create_instance('small', "test_instance_0", @@ -3632,20 +3335,20 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_plus_stages_serial_stop_start_no_instances( + def test_sw_patch_strategy_aio_plus_stages_serial_stop_start_no_instances( self): """ Test the sw_patch strategy add worker strategy stages: - - cpe hosts plus workers + - aio hosts plus workers - no instances - serial apply - stop start instance action """ - self.create_host('controller-0', cpe=True) - self.create_host('controller-1', cpe=True) + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) self.create_host('compute-0') self.create_host('compute-1') @@ -3741,19 +3444,19 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_simplex_stages_serial_migrate(self): + def test_sw_patch_strategy_aio_simplex_stages_serial_migrate(self): """ Test the sw_patch strategy add worker strategy stages: - - simplex cpe host + - simplex aio host - serial apply - migrate instance action Verify: - stage creation fails """ - self.create_host('controller-0', cpe=True) + self.create_host('controller-0', aio=True) self.create_instance('small', "test_instance_0", @@ -3779,15 +3482,15 @@ class TestSwPatchStrategy(testcase.NFVTestCase): assert success is False, "Strategy creation did not fail" - def test_sw_patch_strategy_cpe_simplex_stages_serial_migrate_no_openstack( + def test_sw_patch_strategy_aio_simplex_stages_serial_migrate_no_openstack( self): """ Test the sw_patch strategy add worker strategy stages: - - simplex cpe host (no openstack) + - simplex aio host (no openstack) - serial apply - migrate instance action """ - self.create_host('controller-0', cpe=True, openstack_installed=False) + self.create_host('controller-0', aio=True, openstack_installed=False) worker_hosts = [] for host in self._host_table.values(): @@ -3827,17 +3530,17 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_simplex_stages_serial_stop_start(self): + def test_sw_patch_strategy_aio_simplex_stages_serial_stop_start(self): """ Test the sw_patch strategy add worker strategy stages: - - simplex cpe host + - simplex aio host - serial apply - stop start instance action """ - self.create_host('controller-0', cpe=True) + self.create_host('controller-0', aio=True) self.create_instance('small', "test_instance_0", @@ -3885,18 +3588,18 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) - def test_sw_patch_strategy_cpe_simplex_stages_serial_stop_start_no_instances(self): + def test_sw_patch_strategy_aio_simplex_stages_serial_stop_start_no_instances(self): """ Test the sw_patch strategy add worker strategy stages: - - simplex cpe host + - simplex aio host - no instances - serial apply - stop start instance action """ - self.create_host('controller-0', cpe=True) + self.create_host('controller-0', aio=True) worker_hosts = [] for host in self._host_table.values(): @@ -3936,8 +3639,8 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) def test_sw_patch_strategy_build_complete_parallel_stop_start(self): """ @@ -4023,5 +3726,5 @@ class TestSwPatchStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) diff --git a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py index 5d314ad9..0256da5e 100755 --- a/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py +++ b/nfv/nfv-tests/nfv_unit_tests/tests/test_sw_upgrade_strategy.py @@ -1,325 +1,32 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # -import fixtures import mock -import pprint import testtools import uuid from nfv_common import strategy as common_strategy -from nfv_vim import host_fsm from nfv_vim import nfvi -from nfv_vim import objects from nfv_vim.objects import HOST_NAME from nfv_vim.objects import HOST_PERSONALITY from nfv_vim.objects import SW_UPDATE_ALARM_RESTRICTION from nfv_vim.objects import SW_UPDATE_APPLY_TYPE from nfv_vim.objects import SwUpgrade -from nfv_vim.strategy._strategy import strategy_rebuild_from_dict from nfv_vim.strategy._strategy import SwUpgradeStrategy -from nfv_vim.tables._host_aggregate_table import HostAggregateTable -from nfv_vim.tables._host_group_table import HostGroupTable -from nfv_vim.tables._host_table import HostTable -from nfv_vim.tables._instance_group_table import InstanceGroupTable -from nfv_vim.tables._instance_table import InstanceTable -from nfv_vim.tables._table import Table from nfv_vim.nfvi.objects.v1 import UPGRADE_STATE -from . import testcase # noqa: H304 -from . import utils # noqa: H304 +from . import sw_update_testcase # noqa: H304 -DEBUG_PRINTING = False - - -def validate_strategy_persists(strategy): - """ - Validate that the strategy can be converted to a dict and back without any - loss of data. - Note: This is not foolproof - it won't catch cases where the an object - attribute was missed from both the as_dict and from_dict methods. - """ - strategy_dict = strategy.as_dict() - new_strategy = strategy_rebuild_from_dict(strategy_dict) - - if DEBUG_PRINTING: - if strategy.as_dict() != new_strategy.as_dict(): - print("==================== Strategy ====================") - pprint.pprint(strategy.as_dict()) - print("============== Converted Strategy ================") - pprint.pprint(new_strategy.as_dict()) - assert strategy.as_dict() == new_strategy.as_dict(), \ - "Strategy changed when converting to/from dict" - - -def validate_phase(phase, expected_results): - """ - Validate that the phase matches everything contained in the expected_results - Note: there is probably a super generic, pythonic way to do this, but this - is good enough (tm). - """ - if DEBUG_PRINTING: - print("====================== Phase Results ========================") - pprint.pprint(phase) - print("===================== Expected Results ======================") - pprint.pprint(expected_results) - - for key in expected_results: - if key == 'stages': - stage_number = 0 - for stage in expected_results[key]: - apply_stage = phase[key][stage_number] - for stages_key in stage: - if stages_key == 'steps': - step_number = 0 - for step in stage[stages_key]: - apply_step = apply_stage[stages_key][step_number] - for step_key in step: - assert apply_step[step_key] == step[step_key], \ - "for [%s][%d][%s][%d][%s] found: %s but expected: %s" % \ - (key, stage_number, stages_key, - step_number, step_key, - apply_step[step_key], step[step_key]) - step_number += 1 - else: - assert apply_stage[stages_key] == stage[stages_key], \ - "for [%s][%d][%s] found: %s but expected: %s" % \ - (key, stage_number, stages_key, - apply_stage[stages_key], stage[stages_key]) - stage_number += 1 - else: - assert phase[key] == expected_results[key], \ - "for [%s] found: %s but expected: %s" % \ - (key, phase[key], expected_results[key]) - - -def fake_save(a): - pass - - -def fake_timer(a, b, c, d): - return 1234 - - -def fake_host_name_controller_1(): - return 'controller-1' - - -def fake_host_name_controller_0(): - return 'controller-0' - - -def fake_event_issue(a, b, c, d): - """ - Mock out the _event_issue function because it is being called when instance - objects are created. It ends up trying to communicate with another thread - (that doesn't exist) and this eventually leads to nosetests hanging if - enough events are issued. - """ - return None - - -def fake_nfvi_compute_plugin_disabled(): - return False - - -@mock.patch('nfv_vim.event_log._instance._event_issue', fake_event_issue) -@mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', fake_save) -@mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', fake_timer) -@mock.patch('nfv_vim.nfvi.nfvi_compute_plugin_disabled', fake_nfvi_compute_plugin_disabled) -class TestSwUpgradeStrategy(testcase.NFVTestCase): - - def setUp(self): - super(TestSwUpgradeStrategy, self).setUp() - self._tenant_table = Table() - self._instance_type_table = Table() - self._instance_table = InstanceTable() - self._instance_group_table = InstanceGroupTable() - self._host_table = HostTable() - self._host_group_table = HostGroupTable() - self._host_aggregate_table = HostAggregateTable() - - # Don't attempt to write to the database while unit testing - self._tenant_table.persist = False - self._instance_type_table.persist = False - self._instance_table.persist = False - self._instance_group_table.persist = False - self._host_table.persist = False - self._host_group_table.persist = False - self._host_aggregate_table.persist = False - - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_aggregate_table._host_aggregate_table', - self._host_aggregate_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_group_table._host_group_table', - self._host_group_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._host_table._host_table', - self._host_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_group_table._instance_group_table', - self._instance_group_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_table._instance_table', - self._instance_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._instance_type_table._instance_type_table', - self._instance_type_table)) - self.useFixture(fixtures.MonkeyPatch('nfv_vim.tables._tenant_table._tenant_table', - self._tenant_table)) - - instance_type_uuid = str(uuid.uuid4()) - - instance_type = objects.InstanceType(instance_type_uuid, 'small') - instance_type.update_details(vcpus=1, - mem_mb=64, - disk_gb=1, - ephemeral_gb=0, - swap_gb=0, - guest_services=None, - auto_recovery=True, - live_migration_timeout=800, - live_migration_max_downtime=500) - self._instance_type_table[instance_type_uuid] = instance_type - - def tearDown(self): - """ - Cleanup testing setup. - """ - super(TestSwUpgradeStrategy, self).tearDown() - self._tenant_table.clear() - self._instance_type_table.clear() - self._instance_table.clear() - self._instance_group_table.clear() - self._host_table.clear() - self._host_group_table.clear() - self._host_aggregate_table.clear() - - def create_instance(self, instance_type_name, instance_name, host_name, - admin_state=nfvi.objects.v1.INSTANCE_ADMIN_STATE.UNLOCKED): - """ - Create an instance - """ - tenant_uuid = str(uuid.uuid4()) - image_uuid = str(uuid.uuid4()) - - tenant = objects.Tenant(tenant_uuid, "%s_name" % tenant_uuid, '', True) - self._tenant_table[tenant_uuid] = tenant - - for instance_type in self._instance_type_table.values(): - if instance_type.name == instance_type_name: - instance_uuid = str(uuid.uuid4()) - - nfvi_instance = nfvi.objects.v1.Instance( - instance_uuid, instance_name, tenant_uuid, - admin_state=admin_state, - oper_state=nfvi.objects.v1.INSTANCE_OPER_STATE.ENABLED, - avail_status=list(), - action=nfvi.objects.v1.INSTANCE_ACTION.NONE, - host_name=host_name, - instance_type=utils.instance_type_to_flavor_dict(instance_type), - image_uuid=image_uuid) - - instance = objects.Instance(nfvi_instance) - self._instance_table[instance.uuid] = instance - return - - assert 0, "Unknown instance_type_name: %s" % instance_type_name - - def create_instance_group(self, name, members, policies): - """ - Create an instance group - """ - member_uuids = [] - - for instance_uuid, instance in self._instance_table.items(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_instance_group = nfvi.objects.v1.InstanceGroup( - uuid=str(uuid.uuid4()), - name=name, - member_uuids=member_uuids, - policies=policies - ) - - instance_group = objects.InstanceGroup(nfvi_instance_group) - self._instance_group_table[instance_group.uuid] = instance_group - - def create_host(self, - host_name, - cpe=False, - admin_state=nfvi.objects.v1.HOST_ADMIN_STATE.UNLOCKED, - software_load='12.01', - target_load='12.01'): - """ - Create a host - """ - openstack_compute = False - personality = '' - if host_name.startswith('controller'): - personality = HOST_PERSONALITY.CONTROLLER - if cpe: - personality = personality + ',' + HOST_PERSONALITY.WORKER - elif host_name.startswith('compute'): - personality = HOST_PERSONALITY.WORKER - openstack_compute = True - elif host_name.startswith('storage'): - personality = HOST_PERSONALITY.STORAGE - else: - assert 0, "Invalid host_name: %s" % host_name - - nfvi_host = nfvi.objects.v1.Host( - uuid=str(uuid.uuid4()), - name=host_name, - personality=personality, - admin_state=admin_state, - oper_state=nfvi.objects.v1.HOST_OPER_STATE.ENABLED, - avail_status=nfvi.objects.v1.HOST_AVAIL_STATUS.AVAILABLE, - action=nfvi.objects.v1.HOST_ACTION.NONE, - software_load=software_load, - target_load=target_load, - openstack_compute=openstack_compute, - openstack_control=False, - remote_storage=False, - uptime='1000' - ) - - host = objects.Host(nfvi_host, - initial_state=host_fsm.HOST_STATE.ENABLED) - self._host_table[host.name] = host - - def create_host_group(self, name, members, policies): - """ - Create a host group - """ - member_uuids = [] - - for instance_uuid, instance in self._instance_table.items(): - if instance.name in members: - member_uuids.append(instance_uuid) - - nfvi_host_group = nfvi.objects.v1.HostGroup( - name=name, - member_names=members, - policies=policies - ) - - host_group = objects.HostGroup(nfvi_host_group) - self._host_group_table[host_group.name] = host_group - - def create_host_aggregate(self, name, host_names): - """ - Create a host aggregate - """ - nfvi_host_aggregate = nfvi.objects.v1.HostAggregate( - name=name, - host_names=host_names, - availability_zone='' - ) - - host_aggregate = objects.HostAggregate(nfvi_host_aggregate) - self._host_aggregate_table[host_aggregate.name] = host_aggregate +@mock.patch('nfv_vim.event_log._instance._event_issue', sw_update_testcase.fake_event_issue) +@mock.patch('nfv_vim.objects._sw_update.SwUpdate.save', sw_update_testcase.fake_save) +@mock.patch('nfv_vim.objects._sw_update.timers.timers_create_timer', sw_update_testcase.fake_timer) +@mock.patch('nfv_vim.nfvi.nfvi_compute_plugin_disabled', sw_update_testcase.fake_nfvi_compute_plugin_disabled) +class TestSwUpgradeStrategy(sw_update_testcase.SwUpdateStrategyTestCase): def create_sw_upgrade_strategy(self, storage_apply_type=SW_UPDATE_APPLY_TYPE.IGNORE, @@ -347,7 +54,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): return strategy @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_worker_stages_ignore(self): """ Test the sw_upgrade strategy add worker strategy stages: @@ -394,11 +101,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'total_stages': 0 } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_worker_stages_parallel_migrate_anti_affinity(self): """ Test the sw_upgrade strategy add worker strategy stages: @@ -493,11 +200,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_worker_stages_parallel_migrate_ten_hosts(self): """ Test the sw_upgrade strategy add worker strategy stages: @@ -617,11 +324,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_worker_stages_parallel_migrate_fifty_hosts(self): """ Test the sw_upgrade strategy add worker strategy stages: @@ -726,11 +433,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): } ) - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_worker_stages_serial_migrate(self): """ Test the sw_upgrade strategy add worker strategy stages: @@ -833,11 +540,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_worker_stages_serial_migrate_locked_instance(self): """ Test the sw_upgrade strategy add worker strategy stages: @@ -882,7 +589,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): assert success is False, "Strategy creation did not fail" @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_storage_stages_ignore(self): """ Test the sw_upgrade strategy add storage strategy stages: @@ -925,11 +632,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'total_stages': 0 } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_storage_stages_parallel_host_group(self): """ Test the sw_upgrade strategy add storage strategy stages: @@ -1014,11 +721,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_storage_stages_serial(self): """ Test the sw_upgrade strategy add storage strategy stages: @@ -1118,11 +825,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_controller_stages_serial(self): """ Test the sw_upgrade strategy add controller strategy stages: @@ -1167,11 +874,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_controller_stages_serial_start_upgrade(self): """ Test the sw_upgrade strategy add controller strategy stages: @@ -1232,21 +939,21 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) - def test_sw_upgrade_strategy_cpe_stages_serial(self): + sw_update_testcase.fake_host_name_controller_1) + def test_sw_upgrade_strategy_aio_stages_serial(self): """ Test the sw_upgrade strategy add controller strategy stages: - - cpe hosts + - aio hosts - serial apply Verify: - controller-0 upgraded """ - self.create_host('controller-0', cpe=True) - self.create_host('controller-1', cpe=True) + self.create_host('controller-0', aio=True) + self.create_host('controller-1', aio=True) controller_hosts = [] for host in self._host_table.values(): @@ -1261,7 +968,7 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): reboot=True) assert success is False, "Strategy creation did not fail" - assert reason == "cannot apply software upgrades to CPE configuration", \ + assert reason == "cannot apply software upgrades to AIO configuration", \ "Invalid failure reason" @testtools.skip('No support for start_upgrade') @@ -1419,11 +1126,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_build_complete_serial_migrate(self): """ Test the sw_upgrade strategy build_complete: @@ -1523,11 +1230,11 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): ] } - validate_strategy_persists(strategy) - validate_phase(apply_phase, expected_results) + sw_update_testcase.validate_strategy_persists(strategy) + sw_update_testcase.validate_phase(apply_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_build_complete_invalid_state(self): """ Test the sw_upgrade strategy build_complete: @@ -1569,10 +1276,10 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'result_reason': 'invalid upgrade state for orchestration: data-migration-complete' } - validate_phase(build_phase, expected_results) + sw_update_testcase.validate_phase(build_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_build_complete_no_upgrade_required(self): """ Test the sw_upgrade strategy build_complete: @@ -1610,10 +1317,10 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'result_reason': 'no upgrade in progress' } - validate_phase(build_phase, expected_results) + sw_update_testcase.validate_phase(build_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_build_complete_unupgraded_controller_1(self): """ Test the sw_upgrade strategy build_complete: @@ -1655,10 +1362,10 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'result_reason': 'invalid upgrade state for orchestration: data-migration-complete' } - validate_phase(build_phase, expected_results) + sw_update_testcase.validate_phase(build_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_0) + sw_update_testcase.fake_host_name_controller_0) def test_sw_upgrade_strategy_build_complete_from_controller_0(self): """ Test the sw_upgrade strategy build_complete: @@ -1701,10 +1408,10 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'to upgrade controller-0' } - validate_phase(build_phase, expected_results) + sw_update_testcase.validate_phase(build_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_build_complete_locked_controller(self): """ Test the sw_upgrade strategy build_complete: @@ -1747,10 +1454,10 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'result_reason': 'all controller hosts must be unlocked-enabled-available' } - validate_phase(build_phase, expected_results) + sw_update_testcase.validate_phase(build_phase, expected_results) @mock.patch('nfv_vim.strategy._strategy.get_local_host_name', - fake_host_name_controller_1) + sw_update_testcase.fake_host_name_controller_1) def test_sw_upgrade_strategy_build_complete_locked_worker(self): """ Test the sw_upgrade strategy build_complete: @@ -1793,4 +1500,4 @@ class TestSwUpgradeStrategy(testcase.NFVTestCase): 'result_reason': 'all worker hosts must be unlocked-enabled-available' } - validate_phase(build_phase, expected_results) + sw_update_testcase.validate_phase(build_phase, expected_results) diff --git a/nfv/nfv-vim/nfv_vim/alarm/_sw_update.py b/nfv/nfv-vim/nfv_vim/alarm/_sw_update.py index 163da63c..bdd7110b 100755 --- a/nfv/nfv-vim/nfv_vim/alarm/_sw_update.py +++ b/nfv/nfv-vim/nfv_vim/alarm/_sw_update.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -79,6 +79,41 @@ _alarm_templates = { "problem persists contact next level of support"), 'exclude_alarm_context': [alarm.ALARM_CONTEXT.TENANT], }, + + alarm.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_INPROGRESS: { + 'entity_type': "orchestration", + 'entity': "orchestration=fw-update", + 'event_type': alarm.ALARM_EVENT_TYPE.EQUIPMENT_ALARM, + 'severity': alarm.ALARM_SEVERITY.MAJOR, + 'probable_cause': alarm.ALARM_PROBABLE_CAUSE.UNKNOWN, + 'reason_text': "Firmware update auto-apply inprogress", + 'repair_action': ("Wait for firmware update auto-apply to complete; " + "if problem persists contact next level of support"), + 'exclude_alarm_context': [alarm.ALARM_CONTEXT.TENANT], + }, + alarm.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_ABORTING: { + 'entity_type': "orchestration", + 'entity': "orchestration=fw-update", + 'event_type': alarm.ALARM_EVENT_TYPE.EQUIPMENT_ALARM, + 'severity': alarm.ALARM_SEVERITY.MAJOR, + 'probable_cause': alarm.ALARM_PROBABLE_CAUSE.UNKNOWN, + 'reason_text': "Firmware update auto-apply aborting", + 'repair_action': ("Wait for firmware update auto-apply abort to " + "complete; if problem persists contact next " + "level of support"), + 'exclude_alarm_context': [alarm.ALARM_CONTEXT.TENANT], + }, + alarm.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_FAILED: { + 'entity_type': "orchestration", + 'entity': "orchestration=fw-update", + 'event_type': alarm.ALARM_EVENT_TYPE.EQUIPMENT_ALARM, + 'severity': alarm.ALARM_SEVERITY.CRITICAL, + 'probable_cause': alarm.ALARM_PROBABLE_CAUSE.UNKNOWN, + 'reason_text': "Firmware update auto-apply failed", + 'repair_action': ("Attempt to apply firmware update manually; if " + "problem persists contact next level of support"), + 'exclude_alarm_context': [alarm.ALARM_CONTEXT.TENANT], + }, } diff --git a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/_controller.py b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/_controller.py index 118f1290..18329d11 100755 --- a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/_controller.py +++ b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/_controller.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -10,6 +10,7 @@ from wsme import types as wsme_types import wsmeext.pecan as wsme_pecan from nfv_vim.api._link import Link +from nfv_vim.api.controllers.v1.orchestration.sw_update import FwUpdateAPI from nfv_vim.api.controllers.v1.orchestration.sw_update import SwPatchAPI from nfv_vim.api.controllers.v1.orchestration.sw_update import SwUpgradeAPI @@ -30,7 +31,8 @@ class OrchestrationDescription(wsme_types.Base): description.links = [ Link.make_link('self', url, 'orchestration'), Link.make_link('sw-patch', url, 'orchestration/sw-patch', ''), - Link.make_link('sw-upgrade', url, 'orchestration/sw-upgrade', '')] + Link.make_link('sw-upgrade', url, 'orchestration/sw-upgrade', ''), + Link.make_link('fw-update', url, 'orchestration/fw-update', '')] return description @@ -44,6 +46,8 @@ class OrchestrationAPI(rest.RestController): return SwPatchAPI(), remainder elif 'sw-upgrade' == key: return SwUpgradeAPI(), remainder + elif 'fw-update' == key: + return FwUpdateAPI(), remainder else: pecan.abort(httplib.NOT_FOUND) diff --git a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/__init__.py b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/__init__.py index 0da789f6..c4d000b5 100755 --- a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/__init__.py +++ b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/__init__.py @@ -1,7 +1,8 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # +from nfv_vim.api.controllers.v1.orchestration.sw_update._fw_update import FwUpdateAPI # noqa: F401 from nfv_vim.api.controllers.v1.orchestration.sw_update._sw_patch import SwPatchAPI # noqa: F401 from nfv_vim.api.controllers.v1.orchestration.sw_update._sw_upgrade import SwUpgradeAPI # noqa: F401 diff --git a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_fw_update.py b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_fw_update.py new file mode 100755 index 00000000..18c7ee09 --- /dev/null +++ b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_fw_update.py @@ -0,0 +1,54 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +import pecan +from pecan import rest +from six.moves import http_client as httplib +from wsme import types as wsme_types +import wsmeext.pecan as wsme_pecan + +from nfv_common import debug +from nfv_vim.api._link import Link +from nfv_vim.api.controllers.v1.orchestration.sw_update._sw_update_strategy import FwUpdateStrategyAPI + +DLOG = debug.debug_get_logger('nfv_vim.api.fw_update') + + +class FwUpdateDescription(wsme_types.Base): + """ + Firmware Update Description + """ + id = wsme_types.text + links = wsme_types.wsattr([Link], name='links') + + @classmethod + def convert(cls): + url = pecan.request.host_url + + description = FwUpdateDescription() + description.id = "fw-update" + description.links = [ + Link.make_link('self', url, 'orchestration/fw-update'), + Link.make_link('strategy', url, 'orchestration/fw-update/strategy')] + return description + + +class FwUpdateAPI(rest.RestController): + """ + FwUpdateRest API + """ + @pecan.expose() + def _lookup(self, key, *remainder): + if 'strategy' == key: + return FwUpdateStrategyAPI(), remainder + else: + pecan.abort(httplib.NOT_FOUND) + + @wsme_pecan.wsexpose(FwUpdateDescription) + def get(self): + # NOTE: The reason why convert() is being called for every + # request is because we need to get the host url from + # the request object to make the links. + return FwUpdateDescription.convert() diff --git a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_defs.py b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_defs.py index 74f6dcd5..9359c73a 100755 --- a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_defs.py +++ b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_defs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -18,6 +18,7 @@ class SwUpdateNames(Constants): """ SW_PATCH = Constant('sw-patch') SW_UPGRADE = Constant('sw-upgrade') + FW_UPDATE = Constant('fw-update') @six.add_metaclass(Singleton) @@ -70,7 +71,8 @@ SW_UPDATE_ALARM_RESTRICTION_TYPES = SwUpdateAlarmRestrictionTypes() # WSME Types SwUpdateNames = wsme_types.Enum(str, SW_UPDATE_NAME.SW_PATCH, - SW_UPDATE_NAME.SW_UPGRADE) + SW_UPDATE_NAME.SW_UPGRADE, + SW_UPDATE_NAME.FW_UPDATE) SwUpdateApplyTypes = wsme_types.Enum(str, SW_UPDATE_APPLY_TYPE.SERIAL, SW_UPDATE_APPLY_TYPE.PARALLEL, diff --git a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_strategy.py b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_strategy.py index 9cf7f57f..e3e85d9a 100755 --- a/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_strategy.py +++ b/nfv/nfv-vim/nfv_vim/api/controllers/v1/orchestration/sw_update/_sw_update_strategy.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -27,12 +27,12 @@ from nfv_vim.api.controllers.v1.orchestration.sw_update._sw_update_defs import S from nfv_vim.api.controllers.v1.orchestration.sw_update._sw_update_defs import SwUpdateInstanceActionTypes from nfv_vim.api.controllers.v1.orchestration.sw_update._sw_update_defs import SwUpdateNames - DLOG = debug.debug_get_logger('nfv_vim.api.sw_update.strategy') MIN_PARALLEL_HOSTS = 2 MAX_PARALLEL_PATCH_HOSTS = 100 MAX_PARALLEL_UPGRADE_HOSTS = 10 +MAX_PARALLEL_FW_UPDATE_HOSTS = 5 def _get_sw_update_type_from_path(path): @@ -41,6 +41,8 @@ def _get_sw_update_type_from_path(path): return SW_UPDATE_NAME.SW_PATCH elif 'sw-upgrade' in split_path: return SW_UPDATE_NAME.SW_UPGRADE + elif 'fw-update' in split_path: + return SW_UPDATE_NAME.FW_UPDATE else: DLOG.error("Unknown sw_update_type in path: %s" % path) return 'unknown' @@ -178,6 +180,27 @@ class SwUpdateStrategyDeleteData(wsme_types.Base): default=False) +class FwUpdateStrategyCreateData(wsme_types.Base): + """ + Firmware Update Strategy - Create Data + """ + controller_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True, + name='controller-apply-type') + storage_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True, + name='storage-apply-type') + worker_apply_type = wsme_types.wsattr(SwUpdateApplyTypes, mandatory=True, + name='worker-apply-type') + max_parallel_worker_hosts = wsme_types.wsattr( + int, mandatory=False, name='max-parallel-worker-hosts') + default_instance_action = wsme_types.wsattr(SwUpdateInstanceActionTypes, + mandatory=True, + name='default-instance-action') + alarm_restrictions = wsme_types.wsattr( + SwUpdateAlarmRestrictionTypes, mandatory=False, + default=SW_UPDATE_ALARM_RESTRICTION_TYPES.STRICT, + name='alarm-restrictions') + + class SwUpdateStrategyActionData(wsme_types.Base): """ Software Update Strategy - Action Data @@ -560,3 +583,53 @@ class SwUpgradeStrategyAPI(SwUpdateStrategyAPI): DLOG.error("Unexpected result received, result=%s." % response.result) return pecan.abort(httplib.INTERNAL_SERVER_ERROR) + + +class FwUpdateStrategyAPI(SwUpdateStrategyAPI): + """ + Firmware Update Strategy Rest API + """ + @wsme_pecan.wsexpose(SwUpdateStrategyQueryData, + body=FwUpdateStrategyCreateData, + status_code=httplib.OK) + def post(self, request_data): + rpc_request = rpc.APIRequestCreateSwUpdateStrategy() + rpc_request.sw_update_type = _get_sw_update_type_from_path( + pecan.request.path) + rpc_request.controller_apply_type = request_data.controller_apply_type + rpc_request.storage_apply_type = request_data.storage_apply_type + rpc_request.worker_apply_type = request_data.worker_apply_type + if wsme_types.Unset != request_data.max_parallel_worker_hosts: + if request_data.max_parallel_worker_hosts < MIN_PARALLEL_HOSTS \ + or request_data.max_parallel_worker_hosts > \ + MAX_PARALLEL_FW_UPDATE_HOSTS: + return pecan.abort( + httplib.BAD_REQUEST, + "Invalid value for max-parallel-worker-hosts") + rpc_request.max_parallel_worker_hosts = \ + request_data.max_parallel_worker_hosts + rpc_request.default_instance_action = request_data.default_instance_action + rpc_request.alarm_restrictions = request_data.alarm_restrictions + vim_connection = pecan.request.vim.open_connection() + vim_connection.send(rpc_request.serialize()) + msg = vim_connection.receive(timeout_in_secs=30) + if msg is None: + DLOG.error("No response received.") + return pecan.abort(httplib.INTERNAL_SERVER_ERROR) + + response = rpc.RPCMessage.deserialize(msg) + if rpc.RPC_MSG_TYPE.CREATE_SW_UPDATE_STRATEGY_RESPONSE != response.type: + DLOG.error("Unexpected message type received, msg_type=%s." + % response.type) + return pecan.abort(httplib.INTERNAL_SERVER_ERROR) + + if rpc.RPC_MSG_RESULT.SUCCESS == response.result: + strategy = json.loads(response.strategy) + query_data = SwUpdateStrategyQueryData() + query_data.convert_strategy(strategy) + return query_data + elif rpc.RPC_MSG_RESULT.CONFLICT == response.result: + return pecan.abort(httplib.CONFLICT) + + DLOG.error("Unexpected result received, result=%s." % response.result) + return pecan.abort(httplib.INTERNAL_SERVER_ERROR) diff --git a/nfv/nfv-vim/nfv_vim/database/_database_sw_update.py b/nfv/nfv-vim/nfv_vim/database/_database_sw_update.py index 91889962..d107517f 100755 --- a/nfv/nfv-vim/nfv_vim/database/_database_sw_update.py +++ b/nfv/nfv-vim/nfv_vim/database/_database_sw_update.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016,2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -66,4 +66,7 @@ def database_sw_update_get_list(): elif objects.SW_UPDATE_TYPE.SW_UPGRADE == sw_update.sw_update_type: sw_upgrade_obj = objects.SwUpgrade(sw_update.uuid, strategy_data) sw_update_objs.append(sw_upgrade_obj) + elif objects.SW_UPDATE_TYPE.FW_UPDATE == sw_update.sw_update_type: + fw_update_obj = objects.FwUpdate(sw_update.uuid, strategy_data) + sw_update_objs.append(fw_update_obj) return sw_update_objs diff --git a/nfv/nfv-vim/nfv_vim/debug.ini b/nfv/nfv-vim/nfv_vim/debug.ini index c7a404e6..0ba78e27 100644 --- a/nfv/nfv-vim/nfv_vim/debug.ini +++ b/nfv/nfv-vim/nfv_vim/debug.ini @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -122,6 +122,7 @@ nfv_vim.objects.volume: debug.level.verbose nfv_vim.objects.guest_services: debug.level.verbose nfv_vim.objects.host_group: debug.level.verbose nfv_vim.objects.instance_group: debug.level.verbose +nfv_vim.objects.fw_update: debug.level.info nfv_vim.objects.sw_update: debug.level.verbose nfv_vim.objects.sw_patch: debug.level.verbose nfv_vim.objects.sw_upgrade: debug.level.verbose diff --git a/nfv/nfv-vim/nfv_vim/directors/_directors_defs.py b/nfv/nfv-vim/nfv_vim/directors/_directors_defs.py index d59bc3e5..b100a033 100755 --- a/nfv/nfv-vim/nfv_vim/directors/_directors_defs.py +++ b/nfv/nfv-vim/nfv_vim/directors/_directors_defs.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -25,6 +25,8 @@ class OperationTypes(Constants): REBOOT_HOSTS = Constant('reboot-hosts') UPGRADE_HOSTS = Constant('upgrade-hosts') SWACT_HOSTS = Constant('swact-hosts') + FW_UPDATE_HOSTS = Constant('fw-update-hosts') + FW_UPDATE_ABORT_HOSTS = Constant('fw-update-abort-hosts') START_INSTANCES = Constant('start-instances') START_INSTANCES_SERIAL = Constant('start-instances-serial') STOP_INSTANCES = Constant('stop-instances') diff --git a/nfv/nfv-vim/nfv_vim/directors/_host_director.py b/nfv/nfv-vim/nfv_vim/directors/_host_director.py index 226c81f3..4e646bce 100755 --- a/nfv/nfv-vim/nfv_vim/directors/_host_director.py +++ b/nfv/nfv-vim/nfv_vim/directors/_host_director.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -338,6 +338,80 @@ class HostDirector(object): nfvi.nfvi_swact_from_host(host_uuid, host_name, self._nfvi_swact_host_callback()) + @coroutine + def _nfvi_fw_update_host_callback(self): + """ + NFVI Firmware Update Host Callback + """ + from nfv_vim import directors + + response = (yield) + DLOG.verbose("NFVI Firmware Update Host callback response=%s." % response) + if not response['completed']: + DLOG.info("Firmware Image Update for host %s failed, reason=%s." + % (response['host_name'], response['reason'])) + + host_table = tables.tables_get_host_table() + host = host_table.get(response['host_name'], None) + if host is None: + DLOG.verbose("Host %s does not exist." % response['host_name']) + return + + if self._host_operation is None: + DLOG.verbose("No host %s operation inprogress." % host.name) + return + + if OPERATION_TYPE.FW_UPDATE_HOSTS != self._host_operation.operation_type: + DLOG.verbose("Unexpected host %s operation %s, ignoring." + % (host.name, self._host_operation.operation_type)) + return + + sw_mgmt_director = directors.get_sw_mgmt_director() + sw_mgmt_director.host_fw_update_failed(host) + + def _nfvi_fw_update_host(self, host_uuid, host_name): + """ + NFVI Firmware Image Update Host + """ + nfvi.nfvi_host_device_image_update(host_uuid, host_name, self._nfvi_fw_update_host_callback()) + + @coroutine + def _nfvi_fw_update_abort_callback(self): + """ + NFVI Abort Firmware Update callback + """ + from nfv_vim import directors + + response = (yield) + DLOG.verbose("NFVI Abort Firmware Update callback response=%s." % response) + if not response['completed']: + DLOG.info("Get Host Devices for host %s failed, reason=%s." + % (response['host_name'], response['reason'])) + + host_table = tables.tables_get_host_table() + host = host_table.get(response['host_name'], None) + if host is None: + DLOG.verbose("Host %s does not exist." % response['host_name']) + return + + if self._host_operation is None: + DLOG.verbose("No host %s operation inprogress." % host.name) + return + + if OPERATION_TYPE.FW_UPDATE_ABORT_HOSTS != self._host_operation.operation_type: + DLOG.verbose("Unexpected host %s operation %s, ignoring." + % (host.name, self._host_operation.operation_type)) + return + + sw_mgmt_director = directors.get_sw_mgmt_director() + sw_mgmt_director.host_fw_update_abort_failed(host) + + def _nfvi_fw_update_abort_host(self, host_uuid, host_name): + """ + NFVI Abort Firmware Update + """ + nfvi.nfvi_host_device_image_update_abort(host_uuid, host_name, self._nfvi_fw_update_abort_callback()) + def host_operation_inprogress(self): """ Returns true if a lock of hosts @@ -665,6 +739,70 @@ class HostDirector(object): return host_operation + def fw_update_hosts(self, host_names): + """ + Firmware Update hosts + """ + DLOG.info("Firmware Update hosts: %s" % host_names) + + host_operation = Operation(OPERATION_TYPE.FW_UPDATE_HOSTS) + + if self._host_operation is not None: + DLOG.debug("Canceling previous host operation %s, before " + "continuing with host operation %s." + % (self._host_operation.operation_type, + host_operation.operation_type)) + self._host_operation = None + + host_table = tables.tables_get_host_table() + for host_name in host_names: + host = host_table.get(host_name, None) + if host is None: + reason = "Unknown host %s given." % host_name + DLOG.info(reason) + host_operation.set_failed(reason) + return host_operation + + host_operation.add_host(host.name, OPERATION_STATE.INPROGRESS) + self._nfvi_fw_update_host(host.uuid, host.name) + + if host_operation.is_inprogress(): + self._host_operation = host_operation + + return host_operation + + def fw_update_abort_hosts(self, host_names): + """ + Firmware Update Abort Hosts + """ + DLOG.info("Firmware Update Abort for hosts: %s" % host_names) + + host_operation = Operation(OPERATION_TYPE.FW_UPDATE_ABORT_HOSTS) + + if self._host_operation is not None: + DLOG.debug("Canceling previous host operation %s, before " + "continuing with host operation %s." + % (self._host_operation.operation_type, + host_operation.operation_type)) + self._host_operation = None + + host_table = tables.tables_get_host_table() + for host_name in host_names: + host = host_table.get(host_name, None) + if host is None: + reason = "Unknown host %s given." % host_name + DLOG.info(reason) + host_operation.set_failed(reason) + return host_operation + + host_operation.add_host(host.name, OPERATION_STATE.INPROGRESS) + self._nfvi_fw_update_abort_host(host.uuid, host.name) + + if host_operation.is_inprogress(): + self._host_operation = host_operation + + return host_operation + def disable_host_services(self, host_names, service): """ Disable a host service on a list of hosts diff --git a/nfv/nfv-vim/nfv_vim/directors/_sw_mgmt_director.py b/nfv/nfv-vim/nfv_vim/directors/_sw_mgmt_director.py index f14f5444..8ee384bc 100755 --- a/nfv/nfv-vim/nfv_vim/directors/_sw_mgmt_director.py +++ b/nfv/nfv-vim/nfv_vim/directors/_sw_mgmt_director.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -101,6 +101,38 @@ class SwMgmtDirector(object): self._sw_update.strategy) return strategy_uuid, '' + def create_fw_update_strategy(self, + controller_apply_type, + storage_apply_type, + worker_apply_type, + max_parallel_worker_hosts, + default_instance_action, + alarm_restrictions, + callback): + """ + Create Firmware Update Strategy + """ + strategy_uuid = str(uuid.uuid4()) + + if self._sw_update is not None: + # Do not schedule the callback - if creation failed because a + # strategy already exists, the callback will attempt to operate + # on the old strategy, which is not what we want. + reason = "strategy already exists" + return None, reason + + self._sw_update = objects.FwUpdate() + success, reason = self._sw_update.strategy_build( + strategy_uuid, controller_apply_type, + storage_apply_type, + worker_apply_type, max_parallel_worker_hosts, + default_instance_action, alarm_restrictions, + self._ignore_alarms, self._single_controller) + + schedule.schedule_function_call(callback, success, reason, + self._sw_update.strategy) + return strategy_uuid, '' + def apply_sw_update_strategy(self, strategy_uuid, stage_id, callback): """ Apply Software Update Strategy @@ -196,6 +228,22 @@ class SwMgmtDirector(object): self._sw_update.handle_event( strategy.STRATEGY_EVENT.HOST_UPGRADE_FAILED, host) + def host_fw_update_abort_failed(self, host): + """ + Called when firmware update abort for a host failed + """ + if self._sw_update is not None: + self._sw_update.handle_event( + strategy.STRATEGY_EVENT.HOST_FW_UPDATE_ABORT_FAILED, host) + + def host_fw_update_failed(self, host): + """ + Called when a firmware update of a host failed + """ + if self._sw_update is not None: + self._sw_update.handle_event( + strategy.STRATEGY_EVENT.HOST_FW_UPDATE_FAILED, host) + def host_audit(self, host): """ Called when a host audit is to be performed diff --git a/nfv/nfv-vim/nfv_vim/events/_vim_sw_update_api_events.py b/nfv/nfv-vim/nfv_vim/events/_vim_sw_update_api_events.py index 64b6a629..a3bc1f43 100755 --- a/nfv/nfv-vim/nfv_vim/events/_vim_sw_update_api_events.py +++ b/nfv/nfv-vim/nfv_vim/events/_vim_sw_update_api_events.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -106,6 +106,13 @@ def vim_sw_update_api_create_strategy(connection, msg): alarm_restrictions, start_upgrade, complete_upgrade, _vim_sw_update_api_create_strategy_callback) + elif 'fw-update' == msg.sw_update_type: + uuid, reason = sw_mgmt_director.create_fw_update_strategy( + controller_apply_type, storage_apply_type, + worker_apply_type, max_parallel_worker_hosts, + default_instance_action, + alarm_restrictions, + _vim_sw_update_api_create_strategy_callback) else: DLOG.error("Invalid message name: %s" % msg.sw_update_type) response = rpc.APIResponseCreateSwUpdateStrategy() @@ -162,6 +169,8 @@ def vim_sw_update_api_apply_strategy(connection, msg): sw_update_type = objects.SW_UPDATE_TYPE.SW_PATCH elif 'sw-upgrade' == msg.sw_update_type: sw_update_type = objects.SW_UPDATE_TYPE.SW_UPGRADE + elif 'fw-update' == msg.sw_update_type: + sw_update_type = objects.SW_UPDATE_TYPE.FW_UPDATE else: DLOG.error("Invalid message name: %s" % msg.sw_update_type) sw_update_type = 'unknown' @@ -215,6 +224,8 @@ def vim_sw_update_api_abort_strategy(connection, msg): sw_update_type = objects.SW_UPDATE_TYPE.SW_PATCH elif 'sw-upgrade' == msg.sw_update_type: sw_update_type = objects.SW_UPDATE_TYPE.SW_UPGRADE + elif 'fw-update' == msg.sw_update_type: + sw_update_type = objects.SW_UPDATE_TYPE.FW_UPDATE else: DLOG.error("Invalid message name: %s" % msg.sw_update_type) sw_update_type = 'unknown' @@ -265,6 +276,8 @@ def vim_sw_update_api_delete_strategy(connection, msg): sw_update_type = objects.SW_UPDATE_TYPE.SW_PATCH elif 'sw-upgrade' == msg.sw_update_type: sw_update_type = objects.SW_UPDATE_TYPE.SW_UPGRADE + elif 'fw-update' == msg.sw_update_type: + sw_update_type = objects.SW_UPDATE_TYPE.FW_UPDATE else: DLOG.error("Invalid message name: %s" % msg.sw_update_type) sw_update_type = 'unknown' @@ -294,6 +307,8 @@ def vim_sw_update_api_get_strategy(connection, msg): sw_update_type = objects.SW_UPDATE_TYPE.SW_PATCH elif 'sw-upgrade' == msg.sw_update_type: sw_update_type = objects.SW_UPDATE_TYPE.SW_UPGRADE + elif 'fw-update' == msg.sw_update_type: + sw_update_type = objects.SW_UPDATE_TYPE.FW_UPDATE else: DLOG.error("Invalid message name: %s" % msg.sw_update_type) sw_update_type = 'unknown' diff --git a/nfv/nfv-vim/nfv_vim/nfvi/__init__.py b/nfv/nfv-vim/nfv_vim/nfvi/__init__.py index be71e92e..5b945441 100755 --- a/nfv/nfv-vim/nfv_vim/nfvi/__init__.py +++ b/nfv/nfv-vim/nfv_vim/nfvi/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -97,12 +97,16 @@ from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_alarm_history # n from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_alarms # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_datanetworks # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_host # noqa: F401 +from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_host_device # noqa: F401 +from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_host_devices # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_hosts # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_logs # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_system_info # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_system_state # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_terminating_pods # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_upgrade # noqa: F401 +from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_host_device_image_update # noqa: F401 +from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_host_device_image_update_abort # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_lock_host # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_notify_host_failed # noqa: F401 from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_notify_host_services_delete_failed # noqa: F401 diff --git a/nfv/nfv-vim/nfv_vim/nfvi/_nfvi_infrastructure_module.py b/nfv/nfv-vim/nfv_vim/nfvi/_nfvi_infrastructure_module.py index d882f1a5..630aea76 100755 --- a/nfv/nfv-vim/nfv_vim/nfvi/_nfvi_infrastructure_module.py +++ b/nfv/nfv-vim/nfv_vim/nfvi/_nfvi_infrastructure_module.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -59,6 +59,47 @@ def nfvi_get_host(host_uuid, host_name, callback): return cmd_id +def nfvi_get_host_devices(host_uuid, host_name, callback): + """ + Get host device list details + """ + cmd_id = _infrastructure_plugin.invoke_plugin('get_host_devices', + host_uuid, host_name, + callback=callback) + return cmd_id + + +def nfvi_get_host_device(host_uuid, host_name, device_uuid, device_name, callback): + """ + Get host device details + """ + cmd_id = _infrastructure_plugin.invoke_plugin('get_host_device', + host_uuid, host_name, + device_uuid, device_name, + callback=callback) + return cmd_id + + +def nfvi_host_device_image_update(host_uuid, host_name, callback): + """ + Update host device image + """ + cmd_id = _infrastructure_plugin.invoke_plugin('host_device_image_update', + host_uuid, host_name, + callback=callback) + return cmd_id + + +def nfvi_host_device_image_update_abort(host_uuid, host_name, callback): + """ + Abort host device image update + """ + cmd_id = _infrastructure_plugin.invoke_plugin('host_device_image_update_abort', + host_uuid, host_name, + callback=callback) + return cmd_id + + def nfvi_get_upgrade(callback): """ Get upgrade diff --git a/nfv/nfv-vim/nfv_vim/nfvi/objects/v1/__init__.py b/nfv/nfv-vim/nfv_vim/nfvi/objects/v1/__init__.py index dacc842c..16dca43a 100755 --- a/nfv/nfv-vim/nfv_vim/nfvi/objects/v1/__init__.py +++ b/nfv/nfv-vim/nfv_vim/nfvi/objects/v1/__init__.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -18,6 +18,7 @@ from nfv_vim.nfvi.objects.v1._host import HOST_LABEL_VALUES # noqa: F401 from nfv_vim.nfvi.objects.v1._host import HOST_NOTIFICATIONS # noqa: F401 from nfv_vim.nfvi.objects.v1._host import HOST_OPER_STATE # noqa: F401 from nfv_vim.nfvi.objects.v1._host_aggregate import HostAggregate # noqa: F401 +from nfv_vim.nfvi.objects.v1._host_fw_update import HostFwUpdate # noqa: F401 from nfv_vim.nfvi.objects.v1._host_group import HOST_GROUP_POLICY # noqa: F401 from nfv_vim.nfvi.objects.v1._host_group import HostGroup # noqa: F401 from nfv_vim.nfvi.objects.v1._host_sw_patch import HostSwPatch # noqa: F401 diff --git a/nfv/nfv-vim/nfv_vim/nfvi/objects/v1/_host_fw_update.py b/nfv/nfv-vim/nfv_vim/nfvi/objects/v1/_host_fw_update.py new file mode 100755 index 00000000..2b3e7970 --- /dev/null +++ b/nfv/nfv-vim/nfv_vim/nfvi/objects/v1/_host_fw_update.py @@ -0,0 +1,17 @@ +# +# Copyright (c) 2015-2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +from nfv_vim.nfvi.objects.v1._object import ObjectData + + +class HostFwUpdate(ObjectData): + """ + NFVI Host Firmware Update Object + """ + def __init__(self, hostname, personality, uuid): + super(HostFwUpdate, self).__init__('1.0.0') + self.update(dict(hostname=hostname, + personality=personality, + uuid=uuid)) diff --git a/nfv/nfv-vim/nfv_vim/objects/__init__.py b/nfv/nfv-vim/nfv_vim/objects/__init__.py index 5ed5c29d..edd69f72 100755 --- a/nfv/nfv-vim/nfv_vim/objects/__init__.py +++ b/nfv/nfv-vim/nfv_vim/objects/__init__.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # +from nfv_vim.objects._fw_update import FwUpdate # noqa: F401 from nfv_vim.objects._guest_services import GuestServices # noqa: F401 from nfv_vim.objects._host import Host # noqa: F401 from nfv_vim.objects._host import HOST_NAME # noqa: F401 diff --git a/nfv/nfv-vim/nfv_vim/objects/_fw_update.py b/nfv/nfv-vim/nfv_vim/objects/_fw_update.py new file mode 100644 index 00000000..34efbe12 --- /dev/null +++ b/nfv/nfv-vim/nfv_vim/objects/_fw_update.py @@ -0,0 +1,187 @@ +# +# Copyright (c) 2020 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +from nfv_common import debug + +from nfv_common.helpers import coroutine + +from nfv_vim import alarm +from nfv_vim import event_log +from nfv_vim import nfvi + +from nfv_vim.objects._sw_update import SW_UPDATE_ALARM_TYPES +from nfv_vim.objects._sw_update import SW_UPDATE_EVENT_IDS +from nfv_vim.objects._sw_update import SW_UPDATE_TYPE +from nfv_vim.objects._sw_update import SwUpdate + +DLOG = debug.debug_get_logger('nfv_vim.objects.fw_update') + + +class FwUpdate(SwUpdate): + """ + Firmware Update Object + """ + def __init__(self, sw_update_uuid=None, strategy_data=None): + super(FwUpdate, self).__init__(sw_update_type=SW_UPDATE_TYPE.FW_UPDATE, + sw_update_uuid=sw_update_uuid, + strategy_data=strategy_data) + + self._fw_update_hosts = list() + + def strategy_build(self, strategy_uuid, controller_apply_type, + storage_apply_type, worker_apply_type, + max_parallel_worker_hosts, + default_instance_action, alarm_restrictions, + ignore_alarms, + single_controller): + """ + Create a firmware update strategy + """ + from nfv_vim import strategy + + if self._strategy: + reason = "strategy already exists" + return False, reason + + self._strategy = strategy.FwUpdateStrategy( + strategy_uuid, controller_apply_type, storage_apply_type, + worker_apply_type, max_parallel_worker_hosts, + default_instance_action, + alarm_restrictions, ignore_alarms, + single_controller) + + self._strategy.sw_update_obj = self + self._strategy.build() + self._persist() + return True, '' + + def strategy_build_complete(self, success, reason): + """ + Creation of a firmware update strategy complete + """ + DLOG.info("Firmware update strategy build complete.") + pass + + @staticmethod + def alarm_type(alarm_type): + """ + Returns ALARM_TYPE corresponding to SW_UPDATE_ALARM_TYPES + """ + ALARM_TYPE_MAPPING = { + SW_UPDATE_ALARM_TYPES.APPLY_INPROGRESS: + alarm.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_INPROGRESS, + SW_UPDATE_ALARM_TYPES.APPLY_ABORTING: + alarm.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_ABORTING, + SW_UPDATE_ALARM_TYPES.APPLY_FAILED: + alarm.ALARM_TYPE.FW_UPDATE_AUTO_APPLY_FAILED, + } + return ALARM_TYPE_MAPPING[alarm_type] + + @staticmethod + def event_id(event_id): + """ + Returns EVENT_ID corresponding to SW_UPDATE_EVENT_IDS + """ + EVENT_ID_MAPPING = { + SW_UPDATE_EVENT_IDS.APPLY_START: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_START, + SW_UPDATE_EVENT_IDS.APPLY_INPROGRESS: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_INPROGRESS, + SW_UPDATE_EVENT_IDS.APPLY_REJECTED: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_REJECTED, + SW_UPDATE_EVENT_IDS.APPLY_CANCELLED: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_CANCELLED, + SW_UPDATE_EVENT_IDS.APPLY_FAILED: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_FAILED, + SW_UPDATE_EVENT_IDS.APPLY_COMPLETED: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_COMPLETED, + SW_UPDATE_EVENT_IDS.APPLY_ABORT: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORT, + SW_UPDATE_EVENT_IDS.APPLY_ABORTING: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORTING, + SW_UPDATE_EVENT_IDS.APPLY_ABORT_REJECTED: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORT_REJECTED, + SW_UPDATE_EVENT_IDS.APPLY_ABORT_FAILED: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORT_FAILED, + SW_UPDATE_EVENT_IDS.APPLY_ABORTED: + event_log.EVENT_ID.FW_UPDATE_AUTO_APPLY_ABORTED, + } + return EVENT_ID_MAPPING[event_id] + + def nfvi_update(self): + """ + NFVI Update + """ + if self._strategy is None: + if self._alarms: + alarm.clear_sw_update_alarm(self._alarms) + return False + + if self.strategy.is_applying(): + if not self._alarms: + self._alarms = \ + alarm.raise_sw_update_alarm( + self.alarm_type(SW_UPDATE_ALARM_TYPES.APPLY_INPROGRESS)) + + event_log.sw_update_issue_log( + self.event_id(SW_UPDATE_EVENT_IDS.APPLY_INPROGRESS)) + + elif self.strategy.is_apply_failed() or self.strategy.is_apply_timed_out(): + for fw_update_host in self._fw_update_hosts: + if not self._alarms: + self._alarms = \ + alarm.raise_sw_update_alarm( + self.alarm_type(SW_UPDATE_ALARM_TYPES.APPLY_FAILED)) + + event_log.sw_update_issue_log( + self.event_id(SW_UPDATE_EVENT_IDS.APPLY_FAILED)) + break + + else: + if self._alarms: + alarm.clear_sw_update_alarm(self._alarms) + return False + + elif self.strategy.is_aborting(): + if not self._alarms: + self._alarms = \ + alarm.raise_sw_update_alarm( + self.alarm_type(SW_UPDATE_ALARM_TYPES.APPLY_ABORTING)) + + event_log.sw_update_issue_log( + self.event_id(SW_UPDATE_EVENT_IDS.APPLY_ABORTING)) + + else: + if self._alarms: + alarm.clear_sw_update_alarm(self._alarms) + return False + + return True + + @coroutine + def nfvi_audit(self): + """ + Audit NFVI layer + """ + while True: + timer_id = (yield) + + DLOG.debug("Audit alarms, timer_id=%s." % timer_id) + self.nfvi_alarms_clear() + nfvi.nfvi_get_alarms(self.nfvi_alarms_callback(timer_id)) + if not nfvi.nfvi_fault_mgmt_plugin_disabled(): + nfvi.nfvi_get_openstack_alarms(self.nfvi_alarms_callback(timer_id)) + self._nfvi_audit_inprogress = True + while self._nfvi_audit_inprogress: + timer_id = (yield) + + if not self.nfvi_update(): + DLOG.info("Audit no longer needed.") + break + + DLOG.verbose("Audit firmware update still running, timer_id=%s." % + timer_id) + + self._nfvi_timer_id = None diff --git a/nfv/nfv-vim/nfv_vim/objects/_sw_update.py b/nfv/nfv-vim/nfv_vim/objects/_sw_update.py index 500e6524..1eee2083 100755 --- a/nfv/nfv-vim/nfv_vim/objects/_sw_update.py +++ b/nfv/nfv-vim/nfv_vim/objects/_sw_update.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2016 Wind River Systems, Inc. +# Copyright (c) 2016-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -29,6 +29,7 @@ class SwUpdateTypes(Constants): """ SW_PATCH = Constant('sw-patch') SW_UPGRADE = Constant('sw-upgrade') + FW_UPDATE = Constant('fw-update') @six.add_metaclass(Singleton) diff --git a/nfv/nfv-vim/nfv_vim/strategy/__init__.py b/nfv/nfv-vim/nfv_vim/strategy/__init__.py index 916f01c7..01f340af 100755 --- a/nfv/nfv-vim/nfv_vim/strategy/__init__.py +++ b/nfv/nfv-vim/nfv_vim/strategy/__init__.py @@ -1,18 +1,22 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # from nfv_common.strategy import * # noqa: F401,F403 +from nfv_vim.strategy._strategy import FwUpdateStrategy # noqa: F401 from nfv_vim.strategy._strategy import strategy_rebuild_from_dict # noqa: F401 from nfv_vim.strategy._strategy import SwPatchStrategy # noqa: F401 from nfv_vim.strategy._strategy import SwUpgradeStrategy # noqa: F401 from nfv_vim.strategy._strategy_defs import STRATEGY_EVENT # noqa: F401 from nfv_vim.strategy._strategy_stages import STRATEGY_STAGE_NAME # noqa: F401 from nfv_vim.strategy._strategy_steps import DisableHostServicesStep # noqa: F401 +from nfv_vim.strategy._strategy_steps import FwUpdateAbortHostsStep # noqa: F401 +from nfv_vim.strategy._strategy_steps import FwUpdateHostsStep # noqa: F401 from nfv_vim.strategy._strategy_steps import LockHostsStep # noqa: F401 from nfv_vim.strategy._strategy_steps import MigrateInstancesStep # noqa: F401 from nfv_vim.strategy._strategy_steps import QueryAlarmsStep # noqa: F401 +from nfv_vim.strategy._strategy_steps import QueryHostDeviceListStep # noqa: F401 from nfv_vim.strategy._strategy_steps import QuerySwPatchesStep # noqa: F401 from nfv_vim.strategy._strategy_steps import QuerySwPatchHostsStep # noqa: F401 from nfv_vim.strategy._strategy_steps import QueryUpgradeStep # noqa: F401 diff --git a/nfv/nfv-vim/nfv_vim/strategy/_strategy.py b/nfv/nfv-vim/nfv_vim/strategy/_strategy.py index e19da9c2..054338f4 100755 --- a/nfv/nfv-vim/nfv_vim/strategy/_strategy.py +++ b/nfv/nfv-vim/nfv_vim/strategy/_strategy.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # +import copy import six import weakref @@ -32,6 +33,7 @@ class StrategyNames(Constants): """ SW_PATCH = Constant('sw-patch') SW_UPGRADE = Constant('sw-upgrade') + FW_UPDATE = Constant('fw-update') # Constant Instantiation @@ -45,6 +47,11 @@ MTCE_DELAY = 15 NO_REBOOT_DELAY = 30 +################################################################### +# +# The Software Update Strategy ; Base Class +# +################################################################### class SwUpdateStrategy(strategy.Strategy): """ Software Update - Strategy @@ -63,9 +70,12 @@ class SwUpdateStrategy(strategy.Strategy): self._max_parallel_worker_hosts = max_parallel_worker_hosts self._default_instance_action = default_instance_action self._alarm_restrictions = alarm_restrictions - self._ignore_alarms = ignore_alarms self._sw_update_obj_reference = None + # The ignore_alarms is a list that needs to get a copy + # to avoid inadvertently modifying the input list by + # subclass service strategies. + self._ignore_alarms = copy.copy(ignore_alarms) self._nfvi_alarms = list() @property @@ -339,7 +349,7 @@ class SwUpdateStrategy(strategy.Strategy): host_lists = controller_list + host_lists else: - DLOG.verbose("Compute apply type set to ignore.") + DLOG.verbose("Worker apply type set to ignore.") # Drop empty lists and enforce a maximum number of hosts to be updated # at once (only required list of workers with no instances, as we @@ -469,6 +479,11 @@ class SwUpdateStrategy(strategy.Strategy): return data +################################################################### +# +# The Software Patch Strategy +# +################################################################### class SwPatchStrategy(SwUpdateStrategy): """ Software Patch - Strategy @@ -1099,6 +1114,11 @@ class SwPatchStrategy(SwUpdateStrategy): return data +################################################################### +# +# The Software Upgrade Strategy +# +################################################################### class SwUpgradeStrategy(SwUpdateStrategy): """ Software Upgrade - Strategy @@ -1239,8 +1259,8 @@ class SwUpgradeStrategy(SwUpdateStrategy): for host in controllers: if HOST_PERSONALITY.WORKER in host.personality: - DLOG.warn("Cannot apply software upgrades to CPE configuration.") - reason = 'cannot apply software upgrades to CPE configuration' + DLOG.warn("Cannot apply software upgrades to AIO configuration.") + reason = 'cannot apply software upgrades to AIO configuration' return False, reason elif HOST_NAME.CONTROLLER_1 == host.name: controller_1_host = host @@ -1623,11 +1643,359 @@ class SwUpgradeStrategy(SwUpdateStrategy): return data +################################################################### +# +# The Firmware Update Strategy +# +################################################################### +class FwUpdateStrategy(SwUpdateStrategy): + """ + Firmware Update - Strategy - FPGA + """ + def __init__(self, uuid, controller_apply_type, storage_apply_type, + worker_apply_type, max_parallel_worker_hosts, + default_instance_action, + alarm_restrictions, ignore_alarms, + single_controller): + super(FwUpdateStrategy, self).__init__( + uuid, + STRATEGY_NAME.FW_UPDATE, + controller_apply_type, + storage_apply_type, + SW_UPDATE_APPLY_TYPE.IGNORE, + worker_apply_type, + max_parallel_worker_hosts, + default_instance_action, + alarm_restrictions, + ignore_alarms) + + # The following alarms will not prevent a firmware update operation + IGNORE_ALARMS = ['700.004', # VM stopped + '280.002', # Subcloud resource out-of-sync + '900.301', # Fw Update Auto Apply in progress + '200.001', # Locked Host + ] + + self._ignore_alarms += IGNORE_ALARMS + self._single_controller = single_controller + + self._fail_on_alarms = True + + # list of hostnames that need update + self._fw_update_hosts = list() + + @property + def fw_update_hosts(self): + """ + Returns a list of hostnames that require firmware update + """ + return self._fw_update_hosts + + @fw_update_hosts.setter + def fw_update_hosts(self, fw_update_hosts): + """ + Save a list of hostnames that require firmware update + """ + self._fw_update_hosts = fw_update_hosts + + def build(self): + """ + Build the strategy + """ + from nfv_vim import strategy + from nfv_vim import tables + + stage = strategy.StrategyStage( + strategy.STRATEGY_STAGE_NAME.FW_UPDATE_HOSTS_QUERY) + + # Firmware update is only supported for hosts that support + # the worker function. + if self._worker_apply_type == SW_UPDATE_APPLY_TYPE.IGNORE: + msg = "apply type is 'ignore' ; must be '%s' or '%s'" % \ + (SW_UPDATE_APPLY_TYPE.SERIAL, + SW_UPDATE_APPLY_TYPE.PARALLEL) + DLOG.warn("Worker %s" % msg) + self._state = strategy.STRATEGY_STATE.BUILD_FAILED + self.build_phase.result = strategy.STRATEGY_PHASE_RESULT.FAILED + self.build_phase.result_reason = "Worker " + msg + self.sw_update_obj.strategy_build_complete( + False, self.build_phase.result_reason) + self.save() + return + + stage.add_step(strategy.QueryAlarmsStep( + self._fail_on_alarms, + ignore_alarms=self._ignore_alarms)) + + # using existing vim host inventory add a step for each host + host_table = tables.tables_get_host_table() + for host in host_table.values(): + if HOST_PERSONALITY.WORKER in host.personality: + if host.is_unlocked() and host.is_enabled(): + stage.add_step(strategy.QueryHostDeviceListStep(host)) + + self.build_phase.add_stage(stage) + super(FwUpdateStrategy, self).build() + + def _add_worker_strategy_stages(self, worker_hosts, reboot): + """ + Add worker firmware update strategy stages + """ + from nfv_vim import strategy + from nfv_vim import tables + + hostnames = '' + for host in worker_hosts: + hostnames += host.name + ' ' + DLOG.info("Worker hosts that require firmware update: %s " % hostnames) + + # When using a single controller/worker host that is running + # OpenStack, only allow the stop/start instance action. + if self._single_controller: + for host in worker_hosts: + if host.openstack_compute and \ + HOST_PERSONALITY.CONTROLLER in host.personality and \ + SW_UPDATE_INSTANCE_ACTION.STOP_START != \ + self._default_instance_action: + DLOG.error("Cannot migrate instances in a single " + "controller configuration") + reason = 'cannot migrate instances in a single ' \ + 'controller configuration' + return False, reason + + # Returns a list of 'host update lists' based on serial vs parallel + # update specification and the overall host pool and various aspects + # of the hosts in that pool ; i.e. personality, instances, etc. + host_lists, reason = self._create_worker_host_lists(worker_hosts, reboot) + if host_lists is None: + DLOG.info("failed to create worker host lists") + return False, reason + + instance_table = tables.tables_get_instance_table() + + # Loop over the host aggregate lists creating back to back steps + # that will update all the worker hosts in the order dictated + # by the strategy. + for host_list in host_lists: + + # Start the Update Worker Hosts Stage ; the stage that includes all + # the steps to update all the worker hosts found to need firmware update. + stage = strategy.StrategyStage(strategy.STRATEGY_STAGE_NAME.FW_UPDATE_WORKER_HOSTS) + + # build a list of unlocked instances + instance_list = list() + for host in host_list: + for instance in instance_table.on_host(host.name): + # Do not take action (migrate or stop-start) on + # an instance if it is locked (i.e. stopped). + if not instance.is_locked(): + instance_list.append(instance) + + # Handle alarms that show up after create but before apply. + stage.add_step(strategy.QueryAlarmsStep( + self._fail_on_alarms, + ignore_alarms=self._ignore_alarms)) + + # Issue Firmware Update for hosts in host_list + stage.add_step(strategy.FwUpdateHostsStep(host_list)) + + # Handle reboot-required option with host lock/unlock. + if reboot: + if 1 == len(host_list): + if HOST_PERSONALITY.CONTROLLER in host_list[0].personality: + if not self._single_controller: + # Handle upgrade of both controllers + # in AIO DX Swact controller before locking. + # If this is not the active controller then it has no effect + stage.add_step(strategy.SwactHostsStep(host_list)) + + # Handle instance migration + if len(instance_list): + # Migrate or stop instances as necessary + if SW_UPDATE_INSTANCE_ACTION.MIGRATE == \ + self._default_instance_action: + if SW_UPDATE_APPLY_TYPE.PARALLEL == \ + self._worker_apply_type: + # Disable host services before migrating to ensure + # instances do not migrate to worker hosts in the + # same set of hosts. + if host_list[0].host_service_configured( + HOST_SERVICES.COMPUTE): + stage.add_step(strategy.DisableHostServicesStep( + host_list, HOST_SERVICES.COMPUTE)) + # TODO(ksmith) + # When support is added for orchestration on + # non-OpenStack worker nodes, support for disabling + # kubernetes services will have to be added. + stage.add_step(strategy.MigrateInstancesStep( + instance_list)) + else: + stage.add_step(strategy.StopInstancesStep( + instance_list)) + + wait_until_disabled = True + if 1 == len(host_list): + if HOST_PERSONALITY.CONTROLLER in \ + host_list[0].personality: + if self._single_controller: + # Handle upgrade of AIO SX + # A single controller will not go disabled when + # it is locked. + wait_until_disabled = False + + # Lock hosts + stage.add_step(strategy.LockHostsStep(host_list, wait_until_disabled=wait_until_disabled)) + + # Wait for system to stabilize + stage.add_step(strategy.SystemStabilizeStep(timeout_in_secs=MTCE_DELAY)) + + # Unlock hosts + stage.add_step(strategy.UnlockHostsStep(host_list)) + + if 0 != len(instance_list): + # Start any instances that were stopped + if SW_UPDATE_INSTANCE_ACTION.MIGRATE != \ + self._default_instance_action: + stage.add_step(strategy.StartInstancesStep( + instance_list)) + + stage.add_step(strategy.SystemStabilizeStep()) + else: + # Less time required if host is not rebooting + stage.add_step(strategy.SystemStabilizeStep( + timeout_in_secs=NO_REBOOT_DELAY)) + + self.apply_phase.add_stage(stage) + + return True, '' + + def build_complete(self, result, result_reason): + """ + Strategy Build Complete + """ + from nfv_vim import strategy + from nfv_vim import tables + + result, result_reason = \ + super(FwUpdateStrategy, self).build_complete(result, result_reason) + + DLOG.verbose("Build Complete Callback, result=%s, reason=%s." % + (result, result_reason)) + + if result in [strategy.STRATEGY_RESULT.SUCCESS, + strategy.STRATEGY_RESULT.DEGRADED]: + + if self._nfvi_alarms: + # Fail create strategy if unignored alarms present + DLOG.warn("Active alarms found, can't update firmware.") + alarm_id_list = "" + for alarm_data in self._nfvi_alarms: + if alarm_id_list: + alarm_id_list += ', ' + alarm_id_list += alarm_data['alarm_id'] + DLOG.warn("... active alarms: %s" % alarm_id_list) + self._state = strategy.STRATEGY_STATE.BUILD_FAILED + self.build_phase.result = strategy.STRATEGY_PHASE_RESULT.FAILED + self.build_phase.result_reason = 'active alarms present ; ' + self.build_phase.result_reason += alarm_id_list + self.sw_update_obj.strategy_build_complete( + False, self.build_phase.result_reason) + self.save() + return + + # Fail if no hosts require firmware upgrade. + if len(self._fw_update_hosts) == 0: + self.build_phase.result_reason = "no firmware update required" + DLOG.warn(self.build_phase.result_reason) + self._state = strategy.STRATEGY_STATE.BUILD_FAILED + self.build_phase.result = strategy.STRATEGY_PHASE_RESULT.FAILED + self.sw_update_obj.strategy_build_complete( + False, self.build_phase.result_reason) + self.save() + return + + worker_hosts = list() + host_table = tables.tables_get_host_table() + for host in host_table.values(): + if host.name in self._fw_update_hosts: + worker_hosts.append(host) + + STRATEGY_CREATION_COMMANDS = [ + (self._add_worker_strategy_stages, + worker_hosts, True)] + + for add_strategy_stages_function, host_list, reboot in \ + STRATEGY_CREATION_COMMANDS: + if host_list: + success, reason = add_strategy_stages_function( + host_list, reboot) + if not success: + self._state = strategy.STRATEGY_STATE.BUILD_FAILED + self.build_phase.result = \ + strategy.STRATEGY_PHASE_RESULT.FAILED + self.build_phase.result_reason = reason + self.sw_update_obj.strategy_build_complete( + False, self.build_phase.result_reason) + self.save() + return + else: + self.sw_update_obj.strategy_build_complete( + False, self.build_phase.result_reason) + + self.sw_update_obj.strategy_build_complete(True, '') + self.save() + + def from_dict(self, + data, + build_phase=None, + apply_phase=None, + abort_phase=None): + """ + Load firmware update strategy object from dict data. + """ + from nfv_vim import nfvi + + super(FwUpdateStrategy, self).from_dict( + data, build_phase, apply_phase, abort_phase) + + self._single_controller = data['single_controller'] + + # Load nfvi alarm data + nfvi_alarms = list() + nfvi_alarms_data = data.get('nfvi_alarms_data') + if nfvi_alarms_data: + for alarm_data in data['nfvi_alarms_data']: + alarm = nfvi.objects.v1.Alarm( + alarm_data['alarm_uuid'], alarm_data['alarm_id'], + alarm_data['entity_instance_id'], alarm_data['severity'], + alarm_data['reason_text'], alarm_data['timestamp'], + alarm_data['mgmt_affecting']) + nfvi_alarms.append(alarm) + self._nfvi_alarms = nfvi_alarms + return self + + def as_dict(self): + """ + Return firmware update strategy nfvi data object as dictionary. + """ + data = super(FwUpdateStrategy, self).as_dict() + + data['single_controller'] = self._single_controller + + # Save nfvi alarm info to data + if self._nfvi_alarms: + nfvi_alarms_data = list() + for alarm in self._nfvi_alarms: + nfvi_alarms_data.append(alarm.as_dict()) + data['nfvi_alarms_data'] = nfvi_alarms_data + return data + + def strategy_rebuild_from_dict(data): """ Returns the strategy object initialized using the given dictionary """ - from nfv_vim.strategy._strategy_phases import strategy_phase_rebuild_from_dict + from nfv_vim.strategy._strategy_phases import strategy_phase_rebuild_from_dict # noqa: F401 if not data: return None @@ -1640,6 +2008,8 @@ def strategy_rebuild_from_dict(data): strategy_obj = object.__new__(SwPatchStrategy) elif STRATEGY_NAME.SW_UPGRADE == data['name']: strategy_obj = object.__new__(SwUpgradeStrategy) + elif STRATEGY_NAME.FW_UPDATE == data['name']: + strategy_obj = object.__new__(FwUpdateStrategy) else: strategy_obj = object.__new__(strategy.StrategyStage) diff --git a/nfv/nfv-vim/nfv_vim/strategy/_strategy_defs.py b/nfv/nfv-vim/nfv_vim/strategy/_strategy_defs.py index 0d08e642..87f4f906 100755 --- a/nfv/nfv-vim/nfv_vim/strategy/_strategy_defs.py +++ b/nfv/nfv-vim/nfv_vim/strategy/_strategy_defs.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -18,6 +18,8 @@ class EventNames(object): HOST_UNLOCK_FAILED = Constant('host-unlock-failed') HOST_REBOOT_FAILED = Constant('host-reboot-failed') HOST_UPGRADE_FAILED = Constant('host-upgrade-failed') + HOST_FW_UPDATE_FAILED = Constant('host-fw-update-failed') + HOST_FW_UPDATE_ABORT_FAILED = Constant('host-fw-update-abort-failed') HOST_SWACT_FAILED = Constant('host-swact-failed') HOST_STATE_CHANGED = Constant('host-state-changed') HOST_AUDIT = Constant('host-audit') @@ -30,3 +32,20 @@ class EventNames(object): # Constants STRATEGY_EVENT = EventNames() + + +@six.add_metaclass(Singleton) +class FirmwareUpdateLabels(object): + """ + Firmware Update Labels + """ + # Host image update pending key label : True / False + DEVICE_IMAGE_NEEDS_FIRMWARE_UPDATE = Constant('needs_firmware_update') + + # Device Image Status + DEVICE_IMAGE_UPDATE_PENDING = Constant('pending') + DEVICE_IMAGE_UPDATE_IN_PROGRESS = Constant('in-progress') + DEVICE_IMAGE_UPDATE_COMPLETED = Constant('completed') + DEVICE_IMAGE_UPDATE_FAILED = Constant('failed') + +FW_UPDATE_LABEL = FirmwareUpdateLabels() diff --git a/nfv/nfv-vim/nfv_vim/strategy/_strategy_stages.py b/nfv/nfv-vim/nfv_vim/strategy/_strategy_stages.py index 9f157035..50227ad5 100755 --- a/nfv/nfv-vim/nfv_vim/strategy/_strategy_stages.py +++ b/nfv/nfv-vim/nfv_vim/strategy/_strategy_stages.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -30,6 +30,10 @@ class StrategyStageNames(Constants): SW_UPGRADE_STORAGE_HOSTS = Constant('sw-upgrade-storage-hosts') SW_UPGRADE_WORKER_HOSTS = Constant('sw-upgrade-worker-hosts') SW_UPGRADE_COMPLETE = Constant('sw-upgrade-complete') + FW_UPDATE_QUERY = Constant('fw-update-query') + FW_UPDATE_HOSTS_QUERY = Constant('fw-update-hosts-query') + FW_UPDATE_HOST_QUERY = Constant('fw-update-host-query') + FW_UPDATE_WORKER_HOSTS = Constant('fw-update-worker-hosts') # Constant Instantiation diff --git a/nfv/nfv-vim/nfv_vim/strategy/_strategy_steps.py b/nfv/nfv-vim/nfv_vim/strategy/_strategy_steps.py index 19e76919..28be5496 100755 --- a/nfv/nfv-vim/nfv_vim/strategy/_strategy_steps.py +++ b/nfv/nfv-vim/nfv_vim/strategy/_strategy_steps.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2016 Wind River Systems, Inc. +# Copyright (c) 2015-2020 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -13,6 +13,7 @@ from nfv_common.helpers import Singleton from nfv_common import strategy from nfv_common import timers from nfv_vim import objects +from nfv_vim.strategy._strategy_defs import FW_UPDATE_LABEL from nfv_vim.strategy._strategy_defs import STRATEGY_EVENT from nfv_vim import tables @@ -24,6 +25,7 @@ class StrategyStepNames(Constants): """ Strategy Step Names """ + QUERY_HOSTS = Constant('query-hosts') SYSTEM_STABILIZE = Constant('system-stabilize') LOCK_HOSTS = Constant('lock-hosts') UNLOCK_HOSTS = Constant('unlock-hosts') @@ -34,6 +36,8 @@ class StrategyStepNames(Constants): COMPLETE_UPGRADE = Constant('complete-upgrade') SWACT_HOSTS = Constant('swact-hosts') SW_PATCH_HOSTS = Constant('sw-patch-hosts') + FW_UPDATE_HOSTS = Constant('fw-update-hosts') + FW_UPDATE_ABORT_HOSTS = Constant('fw-update-abort-hosts') MIGRATE_INSTANCES = Constant('migrate-instances') STOP_INSTANCES = Constant('stop-instances') START_INSTANCES = Constant('start-instances') @@ -41,6 +45,8 @@ class StrategyStepNames(Constants): WAIT_DATA_SYNC = Constant('wait-data-sync') QUERY_SW_PATCHES = Constant('query-sw-patches') QUERY_SW_PATCH_HOSTS = Constant('query-sw-patch-hosts') + QUERY_HOST_DEVICES = Constant('query-host-devices') + QUERY_HOST_DEVICE = Constant('query-host-device') QUERY_UPGRADE = Constant('query-upgrade') DISABLE_HOST_SERVICES = Constant('disable-host-services') ENABLE_HOST_SERVICES = Constant('enable-host-services') @@ -1386,6 +1392,7 @@ class StopInstancesStep(strategy.StrategyStep): if instance is not None: self._instances.append(instance) self._instance_names.append(instance.name) + # Retrieve the host this instance was on when the step was # created. self._instance_host_names[instance.uuid] = \ @@ -1476,6 +1483,7 @@ class QueryAlarmsStep(strategy.StrategyStep): Query Alarms Callback """ response = (yield) + DLOG.debug("Query-Alarms callback response=%s." % response) if response['completed']: @@ -1734,7 +1742,7 @@ class QuerySwPatchHostsStep(strategy.StrategyStep): def as_dict(self): """ - Represent the query software update hosts step as a dictionary + Represent the query software patches hosts step as a dictionary """ data = super(QuerySwPatchHostsStep, self).as_dict() data['entity_type'] = '' @@ -1743,6 +1751,449 @@ class QuerySwPatchHostsStep(strategy.StrategyStep): return data +class QueryHostDeviceListStep(strategy.StrategyStep): + """ + Query Host Device List + """ + + # This step queries system inventory for each host's device list. + # Any hosts whose devices have a firmware update pending have its name + # added to _fw_update_hosts to build a list of host names that require + # firmware update. + + def __init__(self, host): + super(QueryHostDeviceListStep, self).__init__( + STRATEGY_STEP_NAME.QUERY_HOST_DEVICES, timeout_in_secs=60) + + self._host_names = list() + self._host_uuids = list() + self._host_names.append(host.name) + self._host_uuids.append(host.uuid) + + @coroutine + def _get_host_devices_callback(self): + """ + Query Host Device List callback + """ + from nfv_vim import tables + + response = (yield) + + DLOG.verbose("get-host-devices %s callback response=%s." % + (self._host_names[0], response)) + + if response['completed']: + if self.strategy is not None: + pci_devices = response['result-data'].get('pci_devices') + if pci_devices: + host_added_to_fw_update_hosts_list = False + for pci_device in pci_devices: + if pci_device.get(FW_UPDATE_LABEL.DEVICE_IMAGE_NEEDS_FIRMWARE_UPDATE) is False: + DLOG.verbose("%s:%s device is up-to-date" % + (self._host_names[0], + pci_device.get('name'))) + continue + + # using existing vim host inventory for host info + host_table = tables.tables_get_host_table() + for host in host_table.values(): + if host.uuid == pci_device['host_uuid']: + DLOG.info("%s:%s device requires update" % + (host.name, pci_device.get('name'))) + if host_added_to_fw_update_hosts_list is False: + self.strategy.fw_update_hosts.append(host.name) + host_added_to_fw_update_hosts_list = True + + result = strategy.STRATEGY_STEP_RESULT.SUCCESS + self.stage.step_complete(result, "") + else: + result = strategy.STRATEGY_STEP_RESULT.FAILED + self.stage.step_complete(result, "firmware update query failed") + + def apply(self): + """ + Query Host Device List Apply + """ + from nfv_vim import nfvi + + DLOG.info("%s %s step apply" % (self._host_names[0], self._name)) + + # This step is only ever called with one host name. + nfvi.nfvi_get_host_devices(self._host_uuids[0], + self._host_names[0], + self._get_host_devices_callback()) + return strategy.STRATEGY_STEP_RESULT.WAIT, "" + + def from_dict(self, data): + """ + Load the firmware update host device list step + """ + super(QueryHostDeviceListStep, self).from_dict(data) + self._host_names = data['entity_names'] + self._host_uuids = data['entity_uuids'] + return self + + def as_dict(self): + """ + Represent the object as a dictionary for the strategy + """ + data = super(QueryHostDeviceListStep, self).as_dict() + data['entity_type'] = '' + data['entity_names'] = self._host_names + data['entity_uuids'] = self._host_uuids + return data + + +class FwUpdateHostsStep(strategy.StrategyStep): + """ + Firmware Update Hosts - Strategy Step + """ + # This step starts the firmware update process for the passed in hosts + def __init__(self, hosts): + super(FwUpdateHostsStep, self).__init__( + STRATEGY_STEP_NAME.FW_UPDATE_HOSTS, timeout_in_secs=3600) + + # Constants + self.MONITOR_THRESHOLD = 0 + + self._hosts = hosts + self._host_names = list() + self._host_uuids = list() + self._monitoring_fw_update = False + self._wait_time = 0 + self._host_failed_device_update = dict() + self._host_monitor_counter = dict() + self._host_completed = dict() + for host in hosts: + self._host_names.append(host.name) + self._host_uuids.append(host.uuid) + self._host_completed[host.name] = (False, False, '') + self._host_monitor_counter[host.uuid] = 0 + self._host_failed_device_update[host.name] = list() + + @coroutine + def _host_devices_list_callback(self): + """ + Query Host Device List callback used for monitoring update process + """ + response = (yield) + DLOG.debug("Host-Device-List callback response=%s." % response) + try: + if response['completed']: + if self.strategy is not None: + # find the host for this callback response + host_uuid = response['result-data']['pci_devices'][0].get('host_uuid') + if host_uuid: + if len(self._hosts): + for host in self._hosts: + if host.uuid == host_uuid: + # found it + self._host_monitor_counter[host.uuid] += 1 + pci_devices = response['result-data'].get('pci_devices') + if len(pci_devices): + self._check_status(host.name, + host.uuid, + pci_devices) + return + else: + DLOG.info("failed to find any pci devices") + else: + DLOG.error("failed to find %s in hosts list" % host_uuid) + else: + DLOG.error("failed to get hostname in host device list response") + else: + DLOG.error("failed to get host-device-list ; no strategy") + else: + DLOG.error("get host device list request did not complete") + except Exception as e: + DLOG.exception("Caught exception interpreting host device list") + DLOG.error("Response: %s" % response) + + result = strategy.STRATEGY_STEP_RESULT.FAILED + fail_msg = "failed to get or parse fw update info" + self.stage.step_complete(result, fail_msg) + + def apply(self): + """ + Firmware Update Hosts Apply + """ + from nfv_vim import directors + + DLOG.info("Step (%s) apply for hosts %s." % (self._name, + self._host_names)) + + if len(self._host_names): + host_director = directors.get_host_director() + operation = host_director.fw_update_hosts(self._host_names) + if operation.is_inprogress(): + return strategy.STRATEGY_STEP_RESULT.WAIT, "" + elif operation.is_failed(): + return strategy.STRATEGY_STEP_RESULT.FAILED, operation.reason + + return strategy.STRATEGY_STEP_RESULT.SUCCESS, "" + else: + reason = "no hosts found in firmware update step" + result = strategy.STRATEGY_STEP_RESULT.FAILED + self.stage.step_complete(result, reason) + return strategy.STRATEGY_STEP_RESULT.FAILED, reason + + def handle_event(self, event, event_data=None): + """ + Handle Firmware Image Update events + """ + from nfv_vim import nfvi + + DLOG.debug("Step (%s) handle event (%s)." % (self._name, event)) + + if event == STRATEGY_EVENT.HOST_FW_UPDATE_FAILED: + host = event_data + if host is not None and host.name in self._host_names: + result = strategy.STRATEGY_STEP_RESULT.FAILED + self.stage.step_complete(result, "fw image update failed") + return True + + elif event == STRATEGY_EVENT.HOST_AUDIT: + if not self._monitoring_fw_update: + self._monitoring_fw_update = True + DLOG.info("Start monitoring firmware update progress for %s" % + self._host_names) + + if 0 == self._wait_time: + self._wait_time = timers.get_monotonic_timestamp_in_ms() + now_ms = timers.get_monotonic_timestamp_in_ms() + secs_expired = (now_ms - self._wait_time) / 1000 + if 60 <= secs_expired: + # force timer reload on next audit + self._wait_time = 0 + + for host in self._hosts: + if self._host_completed[host.name][0] is True: + DLOG.info("%s update already done") + continue + + DLOG.info("%s firmware update monitor request %d" % + (host.name, self._host_monitor_counter[host.uuid] + 1)) + nfvi.nfvi_get_host_devices(host.uuid, + host.name, + self._host_devices_list_callback()) + return True + else: + DLOG.warn("Unexpected event (%s)" % event) + + return False + + def _check_status(self, host_name, host_uuid, pci_devices): + """Check firmware update status for specified host""" + + done = True + for pci_device in pci_devices: + if pci_device.get(FW_UPDATE_LABEL.DEVICE_IMAGE_NEEDS_FIRMWARE_UPDATE) is False: + continue + + status = pci_device.get('status') + pci_device_name = pci_device.get('name') + + # Handle simulated testing ; Remove after integration testing + if self.MONITOR_THRESHOLD > 0 or status is None: + if self._host_monitor_counter[host_uuid] >= self.MONITOR_THRESHOLD: + status = FW_UPDATE_LABEL.DEVICE_IMAGE_UPDATE_COMPLETED + else: + status = FW_UPDATE_LABEL.DEVICE_IMAGE_UPDATE_IN_PROGRESS + + # stop monitoring failed devices + if pci_device_name in self._host_failed_device_update[host_name]: + continue + + elif status == FW_UPDATE_LABEL.DEVICE_IMAGE_UPDATE_IN_PROGRESS: + done = False + DLOG.verbose("%s pci device %s firmware update in-progress" % + (host_name, pci_device_name)) + + elif status == FW_UPDATE_LABEL.DEVICE_IMAGE_UPDATE_COMPLETED: + DLOG.verbose("%s %s firmware update complete" % + (host_name, pci_device_name)) + + elif status == FW_UPDATE_LABEL.DEVICE_IMAGE_UPDATE_FAILED: + if pci_device_name not in self._host_failed_device_update[host_name]: + DLOG.info("%s %s firmware update failed" % + (host_name, pci_device_name)) + self._host_failed_device_update[host_name].append(pci_device_name) + + else: + if pci_device_name not in self._host_failed_device_update[host_name]: + self._host_failed_device_update[host_name].append(pci_device_name) + DLOG.info('unexpected device image status (%s)' % status) + + if done: + if len(self._host_failed_device_update[host_name]): + failed_msg = "firmware update failed for devices: " + failed_msg += str(self._host_failed_device_update[host_name]) + self._host_completed[host_name] = (True, False, failed_msg) + else: + self._host_completed[host_name] = (True, True, '') + + # Check for firmware upgrade step complete + self._check_step_complete() + + def _check_step_complete(self): + """ + Check for firmware upgrade step complete + """ + + failed_hosts = "" + done = True + for hostname in self._host_names: + if self._host_completed[hostname][0] is False: + done = False + elif self._host_completed[hostname][1] is False: + failed_hosts += hostname + ' ' + else: + DLOG.info("%s firmware update is complete" % hostname) + + if done: + if len(failed_hosts) == 0: + result = strategy.STRATEGY_STEP_RESULT.SUCCESS + self.stage.step_complete(result, '') + else: + result = strategy.STRATEGY_STEP_RESULT.FAILED + failed_msg = 'Firmware update failed ; %s' % failed_hosts + self.stage.step_complete(result, failed_msg) + + def abort(self): + """ + Returns the abort step related to this step + """ + return [FwUpdateAbortHostsStep(self._hosts)] + + def from_dict(self, data): + """ + Returns the firmware update hosts step object + initialized using the given dictionary + """ + super(FwUpdateHostsStep, self).from_dict(data) + self._hosts = list() + self._host_uuids = list() + self._host_completed = dict() + + self._monitoring_fw_update = False + + self._host_names = data['entity_names'] + host_table = tables.tables_get_host_table() + for host_name in self._host_names: + host = host_table.get(host_name, None) + if host is not None: + self._hosts.append(host) + self._host_uuids.append(host.uuid) + self._host_completed[host_name] = \ + data['hosts_completed'][host_name] + return self + + def as_dict(self): + """ + Represent the firmware update hosts step as a dictionary + """ + data = super(FwUpdateHostsStep, self).as_dict() + data['entity_type'] = 'hosts' + data['entity_names'] = self._host_names + data['entity_uuids'] = self._host_uuids + data['hosts_completed'] = self._host_completed + return data + + +class FwUpdateAbortHostsStep(strategy.StrategyStep): + """ + Firmware Update Abort Hosts Step + """ + def __init__(self, hosts): + super(FwUpdateAbortHostsStep, self).__init__( + STRATEGY_STEP_NAME.FW_UPDATE_ABORT_HOSTS, timeout_in_secs=3600) + + self._hosts = hosts + self._host_names = list() + self._host_uuids = list() + + self._wait_time = 0 + + self._host_completed = dict() + for host in hosts: + self._host_names.append(host.name) + self._host_uuids.append(host.uuid) + self._host_completed[host.name] = (False, False, '') + + def apply(self): + """ + Monitor Firmware Update Abort Hosts Apply + """ + from nfv_vim import directors + + DLOG.info("Step (%s) apply for hosts %s." % (self._name, + self._host_names)) + + host_director = directors.get_host_director() + operation = host_director.fw_update_abort_hosts(self._host_names) + if operation.is_inprogress(): + return strategy.STRATEGY_STEP_RESULT.WAIT, "" + elif operation.is_failed(): + return strategy.STRATEGY_STEP_RESULT.FAILED, operation.reason + + return strategy.STRATEGY_STEP_RESULT.SUCCESS, "" + + def handle_event(self, event, event_data=None): + """ + Handle Firmware Image Update Abort events + """ + # from nfv_vim import nfvi + + DLOG.debug("Step (%s) handle event (%s)." % (self._name, event)) + + if event == STRATEGY_EVENT.HOST_FW_UPDATE_ABORT_FAILED: + host = event_data + if host is not None and host.name in self._host_names: + failed_msg = "device image update abort failed" + DLOG.info("%s %s" % (host.name, failed_msg)) + result = strategy.STRATEGY_STEP_RESULT.FAILED + self.stage.step_complete(result, failed_msg) + return True + + elif event == STRATEGY_EVENT.HOST_AUDIT: + result = strategy.STRATEGY_STEP_RESULT.SUCCESS + self.stage.step_complete(result, '') + return True + + return False + + def from_dict(self, data): + """ + Load the firmware update abort hosts step object + """ + super(FwUpdateAbortHostsStep, self).from_dict(data) + self._hosts = list() + self._host_uuids = list() + self._host_completed = dict() + + self._host_names = data['entity_names'] + host_table = tables.tables_get_host_table() + for host_name in self._host_names: + host = host_table.get(host_name, None) + if host is not None: + self._hosts.append(host) + self._host_uuids.append(host.uuid) + self._host_completed[host_name] = \ + data['hosts_completed'][host_name] + return self + + def as_dict(self): + """ + Save the firmware update abort hosts step as a dictionary + """ + data = super(FwUpdateAbortHostsStep, self).as_dict() + data['entity_type'] = 'hosts' + data['entity_names'] = self._host_names + data['entity_uuids'] = self._host_uuids + data['hosts_completed'] = self._host_completed + return data + + class QueryUpgradeStep(strategy.StrategyStep): """ Query Upgrade - Strategy Step @@ -1872,7 +2323,8 @@ class DisableHostServicesStep(strategy.StrategyStep): host = event_data if host is not None and host.name in self._host_names: result = strategy.STRATEGY_STEP_RESULT.FAILED - self.stage.step_complete(result, "disable host services failed") + self.stage.step_complete(result, + "disable host services failed") return True return False @@ -2080,6 +2532,15 @@ def strategy_step_rebuild_from_dict(data): elif STRATEGY_STEP_NAME.ENABLE_HOST_SERVICES == data['name']: step_obj = object.__new__(EnableHostServicesStep) + elif STRATEGY_STEP_NAME.FW_UPDATE_HOSTS == data['name']: + step_obj = object.__new__(FwUpdateHostsStep) + + elif STRATEGY_STEP_NAME.FW_UPDATE_ABORT_HOSTS == data['name']: + step_obj = object.__new__(FwUpdateAbortHostsStep) + + elif STRATEGY_STEP_NAME.QUERY_HOST_DEVICES == data['name']: + step_obj = object.__new__(QueryHostDeviceListStep) + else: step_obj = object.__new__(strategy.StrategyStep)