Cleanup legacy load and upgrade functionality

This commit removes legacy load and upgrade functionality that's being
replaced by USM. It also modifies the prestage systemcontroller upgrade
check to block prestage if there's a software deployment in-progress.

This commit also removes the check that prevents prestaging if any
uploaded release is not 'deployed'. The prestaging playbook is already
responsible for this logic.

Test Plan:
1. PASS - Run subcloud prestaging and verify that it completes
successfully;
2. PASS - Start the deployment of a software release and attempt
to prestage a subcloud, verify that it fails due to the ongoing
deployment;
3. PASS - On a 24.09 system controller, upload a 24.09.01 patch
and attempt to prestage a subcloud for software deploy using
the parameter --release 24.09 and verify that the prestage
operation is allowed even if the 24.09.01 release is in the
'available' state;
4. PASS - Upload a device image using the "system --os-region-name
SystemController device-image-upload" command and verify that the
image is uploaded to the "/opt/dc-vault/device_images" directory.
5. PASS - Remove the device image and verify that it gets removed from
the "/opt/dc-vault/device_images" directory.

Story: 2010676
Task: 50921

Change-Id: I5155b12995d60969cfa2b8d4624c4d8a82b7dfd2
Signed-off-by: Gustavo Herzmann <gustavo.herzmann@windriver.com>
This commit is contained in:
Gustavo Herzmann 2024-08-26 09:23:14 -03:00
parent 40abe3bdb5
commit 7f7caff49d
17 changed files with 95 additions and 492 deletions

View File

@ -31,7 +31,7 @@
"endpoint_type": "dc-cert"
},
{
"sync_status": "in-sync",
"sync_status": "not-available",
"endpoint_type": "load"
},
{
@ -55,8 +55,12 @@
"endpoint_type": "identity"
},
{
"sync_status": "out-of-sync",
"sync_status": "not-available",
"endpoint_type": "patching"
},
{
"sync_status": "out-of-sync",
"endpoint_type": "usm"
}
],
}

View File

@ -28,7 +28,7 @@
"endpoint_type": "dc-cert"
},
{
"sync_status": "in-sync",
"sync_status": "not-available",
"endpoint_type": "load"
},
{
@ -52,8 +52,12 @@
"endpoint_type": "identity"
},
{
"sync_status": "out-of-sync",
"sync_status": "not-available",
"endpoint_type": "patching"
},
{
"sync_status": "out-of-sync",
"endpoint_type": "usm"
}
],
}

View File

@ -33,7 +33,7 @@
"endpoint_type": "dc-cert"
},
{
"sync_status": "in-sync",
"sync_status": "not-available",
"endpoint_type": "load"
},
{
@ -57,8 +57,12 @@
"endpoint_type": "identity"
},
{
"sync_status": "out-of-sync",
"sync_status": "not-available",
"endpoint_type": "patching"
},
{
"sync_status": "out-of-sync",
"endpoint_type": "usm"
}
],
}

View File

@ -27,6 +27,8 @@ REST_SHOW_TIMEOUT = 150
REST_DELETE_TIMEOUT = 300
# TODO(gherzman): Use the software_client instead of using the requests module
# https://opendev.org/starlingx/update/src/branch/master/software-client
class SoftwareClient(base.DriverBase):
"""Software V1 driver."""
@ -85,6 +87,12 @@ class SoftwareClient(base.DriverBase):
response = requests.delete(url, headers=self.headers, timeout=timeout)
return self._handle_response(response, operation="Deploy delete")
def show_deploy(self, timeout=REST_DEFAULT_TIMEOUT):
"""Show deploy"""
url = self.endpoint + "/deploy"
response = requests.get(url, headers=self.headers, timeout=timeout)
return self._handle_response(response, operation="Show deploy")
def commit_patch(self, releases, timeout=REST_DEFAULT_TIMEOUT):
"""Commit patch"""
release_str = "/".join(releases)

View File

@ -16,6 +16,7 @@
import hashlib
import os
from cgtsclient import client
from cgtsclient.exc import HTTPBadRequest
from cgtsclient.exc import HTTPConflict
from cgtsclient.exc import HTTPNotFound
@ -129,26 +130,21 @@ class SysinvClient(base.DriverBase):
endpoint_type: str = consts.KS_ENDPOINT_ADMIN,
endpoint: str = None,
):
try:
# TOX cannot import cgts_client and all the dependencies therefore
# the client is being lazy loaded since TOX doesn't actually
# require the cgtsclient module.
from cgtsclient import client
self.region_name = region
# The sysinv client doesn't support a session, so we need to
# get an endpoint and token.
if endpoint is None:
endpoint = session.get_endpoint(
service_type="platform", region_name=region, interface=endpoint_type
)
token = session.get_token()
self.sysinv_client = client.Client(
API_VERSION, endpoint=endpoint, token=token, timeout=timeout
# The sysinv client doesn't support a session, so we need to
# get an endpoint and token.
if not endpoint:
endpoint = session.get_endpoint(
service_type=consts.ENDPOINT_TYPE_PLATFORM,
region_name=region,
interface=endpoint_type,
)
self.region_name = region
except exceptions.ServiceUnavailable:
raise
token = session.get_token()
self.sysinv_client = client.Client(
API_VERSION, endpoint=endpoint, token=token, timeout=timeout
)
def get_host(self, hostname_or_id):
"""Get a host by its hostname or id."""
@ -201,10 +197,6 @@ class SysinvClient(base.DriverBase):
]
return self.sysinv_client.ihost.update(host_id, patch)
def upgrade_host(self, host_id, force=False):
"""Invoke the API for 'system host-upgrade'"""
return self.sysinv_client.ihost.upgrade(host_id, force)
def power_on_host(self, host_id):
"""Power on a host"""
action_value = "power-on"
@ -367,58 +359,10 @@ class SysinvClient(base.DriverBase):
"""Install a license."""
return self.sysinv_client.license.install_license(license_file)
def get_loads(self):
"""Get a list of loads."""
return self.sysinv_client.load.list()
def get_load(self, load_id):
"""Get a particular load."""
return self.sysinv_client.load.get(load_id)
def delete_load(self, load_id):
"""Delete a load with the given id
:param: load id
"""
try:
LOG.info(
"delete_load region {} load_id: {}".format(self.region_name, load_id)
)
self.sysinv_client.load.delete(load_id)
except HTTPNotFound:
LOG.info(
"delete_load NotFound {} for region: {}".format(
load_id, self.region_name
)
)
raise exceptions.LoadNotFound(region_name=self.region_name, load_id=load_id)
except Exception as e:
LOG.error("delete_load exception={}".format(e))
raise e
def import_load(self, path_to_iso, path_to_sig):
"""Import the particular software load."""
try:
return self.sysinv_client.load.import_load(
path_to_iso=path_to_iso, path_to_sig=path_to_sig
)
except HTTPBadRequest as e:
if "Max number of loads" in str(e):
raise exceptions.LoadMaxReached(region_name=self.region_name)
raise
def import_load_metadata(self, load):
"""Import the software load metadata."""
return self.sysinv_client.load.import_load_metadata(load=load)
def get_system_health(self):
"""Get system health."""
return self.sysinv_client.health.get()
def get_system_health_upgrade(self):
"""Get platform upgrade health."""
return self.sysinv_client.health.get_upgrade()
def get_kube_upgrade_health(self):
"""Get system health for kube upgrade."""
return self.sysinv_client.health.get_kube_upgrade()
@ -427,29 +371,6 @@ class SysinvClient(base.DriverBase):
"""Get a list of hosts."""
return self.sysinv_client.ihost.list()
def get_upgrades(self):
"""Get a list of upgrades."""
return self.sysinv_client.upgrade.list()
def get_error_msg(self):
"""Get the upgrade message."""
return self.sysinv_client.upgrade.get_upgrade_msg()
def upgrade_activate(self):
"""Invoke the API for 'system upgrade-activate', which is an update"""
patch = [
{"op": "replace", "path": "/state", "value": "activation-requested"},
]
return self.sysinv_client.upgrade.update(patch)
def upgrade_complete(self):
"""Invoke the API for 'system upgrade-complete', which is a delete"""
return self.sysinv_client.upgrade.delete()
def upgrade_start(self, force=False):
"""Invoke the API for 'system upgrade-start', which is a create"""
return self.sysinv_client.upgrade.create(force)
def get_applications(self):
"""Get a list of containerized applications"""

View File

@ -25,7 +25,7 @@ from dcorch.common.i18n import _
class DCCommonException(Exception):
"""Base Commond Driver Exception.
"""Base Common Driver Exception.
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
@ -87,18 +87,10 @@ class CertificateNotFound(NotFound):
)
class LoadNotFound(NotFound):
message = _("Load in region=%(region_name)s with id %(load_id)s not found")
class LoadNotInVault(NotFound):
message = _("Load at path %(path)s not found")
class LoadMaxReached(Conflict):
message = _("Load in region=%(region_name)s at maximum number of loads")
class PlaybookExecutionFailed(DCCommonException):
message = _("Playbook execution failed, command=%(playbook_cmd)s")

View File

@ -71,8 +71,6 @@ VALID_DEPLOY_STATE = [
consts.DEPLOY_STATE_INSTALL_ABORTED,
consts.DEPLOY_STATE_PRE_INSTALL_FAILED,
consts.DEPLOY_STATE_INSTALLING,
consts.DEPLOY_STATE_DATA_MIGRATION_FAILED,
consts.DEPLOY_STATE_UPGRADE_ACTIVATED,
consts.DEPLOY_STATE_RESTORING,
consts.DEPLOY_STATE_RESTORE_PREP_FAILED,
consts.DEPLOY_STATE_RESTORE_FAILED,

View File

@ -199,10 +199,6 @@ DEPLOY_STATE_CONFIG_ABORTED = "config-aborted"
DEPLOY_STATE_ENROLLED = "enroll-complete"
DEPLOY_STATE_ENROLLING = "enrolling"
DEPLOY_STATE_ENROLL_FAILED = "enroll-failed"
DEPLOY_STATE_MIGRATING_DATA = "migrating-data"
DEPLOY_STATE_DATA_MIGRATION_FAILED = "data-migration-failed"
DEPLOY_STATE_MIGRATED = "migrated"
DEPLOY_STATE_UPGRADE_ACTIVATED = "upgrade-activated"
DEPLOY_STATE_PRE_RESTORE = "pre-restore"
DEPLOY_STATE_RESTORE_PREP_FAILED = "restore-prep-failed"
DEPLOY_STATE_RESTORING = "restoring"
@ -312,26 +308,11 @@ DC_LOG_DIR = "/var/log/dcmanager/"
DC_ANSIBLE_LOG_DIR = DC_LOG_DIR + "ansible"
INVENTORY_FILE_POSTFIX = "_inventory.yml"
# The following password is just a temporary and internal password that is used
# after a remote install as part of the upgrade. The real sysadmin password
# will be restored af the subcloud is re-managed at the end of the upgrade.
TEMP_SYSADMIN_PASSWORD = "St8rlingXCloud*"
# System mode
SYSTEM_MODE_DUPLEX = "duplex"
SYSTEM_MODE_SIMPLEX = "simplex"
SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct"
# Load states
ACTIVE_LOAD_STATE = "active"
INACTIVE_LOAD_STATE = "inactive"
IMPORTING_LOAD_STATE = "importing"
IMPORTED_LOAD_STATE = "imported"
IMPORTED_METADATA_LOAD_STATE = "imported-metadata"
ERROR_LOAD_STATE = "error"
DELETING_LOAD_STATE = "deleting"
IMPORTED_LOAD_STATES = [IMPORTED_LOAD_STATE, IMPORTED_METADATA_LOAD_STATE]
# extra_args for kube upgrade
EXTRA_ARGS_TO_VERSION = "to-version"
# extra_args for kube rootca update
@ -403,8 +384,6 @@ INVALID_DEPLOY_STATES_FOR_DELETE = [
DEPLOY_STATE_ABORTING_INSTALL,
DEPLOY_STATE_ABORTING_BOOTSTRAP,
DEPLOY_STATE_ABORTING_CONFIG,
DEPLOY_STATE_MIGRATING_DATA,
DEPLOY_STATE_UPGRADE_ACTIVATED,
DEPLOY_STATE_PRE_RESTORE,
DEPLOY_STATE_RESTORING,
DEPLOY_STATE_PRE_REHOME,

View File

@ -57,42 +57,15 @@ PRINT_PRESTAGE_VERSIONS_TASK = r"prestage\/prestage-versions : Print prestage ve
PRESTAGE_VERSIONS_KEY_STR = "prestage_versions:"
def _get_system_controller_upgrades():
# get a cached keystone client (and token)
try:
os_client = OpenStackDriver(
region_name=dccommon_consts.SYSTEM_CONTROLLER_NAME, region_clients=None
)
except Exception:
LOG.exception(
"Failed to get keystone client for %s",
dccommon_consts.SYSTEM_CONTROLLER_NAME,
)
raise
ks_client = os_client.keystone_client
sysinv_client = SysinvClient(
dccommon_consts.SYSTEM_CONTROLLER_NAME,
ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint("sysinv"),
)
return sysinv_client.get_upgrades()
def is_system_controller_upgrading():
return len(_get_system_controller_upgrades()) != 0
def global_prestage_validate(payload):
"""Global prestage validation (not subcloud-specific)"""
if is_system_controller_upgrading():
if utils.is_system_controller_deploying():
raise exceptions.PrestagePreCheckFailedException(
subcloud=dccommon_consts.SYSTEM_CONTROLLER_NAME,
details=(
"Prestage operations are not allowed while system "
"controller upgrade is in progress."
"controller has a software deployment in progress."
),
)

View File

@ -26,6 +26,7 @@ import resource as sys_resource
import string
import subprocess
from typing import List
from typing import Optional
from typing import Union
import uuid
import xml.etree.ElementTree as ElementTree
@ -1240,26 +1241,6 @@ def get_systemcontroller_installed_releases_ids() -> List[str]:
return get_systemcontroller_deployed_releases(software_list, key="release_id")
def is_software_ready_to_be_prestaged_for_deploy(software_list):
"""Check if software is ready to be prestaged for software deploy
Searches the software list for the status of all releases, whose value
should be deployed.
If at the time of the query any release is in a state other than deployed,
it is considered that the release is not ready to be deployed, regardless of
the specific release.
Args:
software_list (list[dict]): The software list from USM API
Returns:
bool: `True` if all releases are in deployed state, otherwise `False`
"""
return all(release["state"] == software_v1.DEPLOYED for release in software_list)
# TODO(cmondo) - validate the appropriate mechanism for N-1 scenario
def is_software_ready_to_be_prestaged_for_install(software_list, software_version):
"""Check if software is ready to be prestaged for install
@ -1399,14 +1380,6 @@ def get_validated_sw_version_for_prestage(payload, subcloud=None):
"Only base release is deployed, cannot prestage for software deploy."
)
# Ensures that the deploy is not in transition.
# All releases must be in deployed state.
if not is_software_ready_to_be_prestaged_for_deploy(software_list):
return None, (
"All releases must first be deployed, cannot prestage for software "
"deploy."
)
# Ensures that system controller and subcloud have the same
# software version to apply the sw deploy
if subcloud and subcloud_sw_version and subcloud_sw_version != software_version:
@ -1758,25 +1731,6 @@ def decode_and_normalize_passwd(input_passwd):
return passwd
def get_failure_msg(subcloud_region):
try:
os_client = OpenStackDriver(
region_name=subcloud_region,
region_clients=None,
fetch_subcloud_ips=fetch_subcloud_mgmt_ips,
)
keystone_client = os_client.keystone_client
endpoint = keystone_client.endpoint_cache.get_endpoint("sysinv")
sysinv_client = SysinvClient(
subcloud_region, keystone_client.session, endpoint=endpoint
)
msg = sysinv_client.get_error_msg()
return msg
except Exception as e:
LOG.exception("{}: {}".format(subcloud_region, e))
return consts.ERROR_DESC_FAILED
def update_abort_status(context, subcloud_id, deploy_status, abort_failed=False):
"""Update the subcloud deploy status during deploy abort operation.
@ -2254,3 +2208,34 @@ def has_usm_service(subcloud_region, keystone_session):
except keystone_exceptions.EndpointNotFound:
LOG.warning("USM service not found for subcloud_region: %s", subcloud_region)
return False
def get_system_controller_deploy() -> Optional[dict]:
# get a cached keystone client (and token)
try:
os_client = OpenStackDriver(
region_name=dccommon_consts.SYSTEM_CONTROLLER_NAME, region_clients=None
)
except Exception:
LOG.exception(
"Failed to get keystone client for %s",
dccommon_consts.SYSTEM_CONTROLLER_NAME,
)
raise
ks_client = os_client.keystone_client
software_client = software_v1.SoftwareClient(
ks_client.session,
dccommon_consts.SYSTEM_CONTROLLER_NAME,
endpoint=ks_client.endpoint_cache.get_endpoint(
dccommon_consts.ENDPOINT_NAME_USM
),
)
# Show deploy always returns either an empty list when there's no deploy
# or a list with a single element when there's a deploy
deploy_list = software_client.show_deploy()
return deploy_list[0] if deploy_list else None
def is_system_controller_deploying() -> bool:
return get_system_controller_deploy() is not None

View File

@ -137,7 +137,6 @@ TRANSITORY_STATES = {
consts.DEPLOY_STATE_ABORTING_INSTALL: consts.DEPLOY_STATE_INSTALL_FAILED,
consts.DEPLOY_STATE_ABORTING_BOOTSTRAP: consts.DEPLOY_STATE_BOOTSTRAP_FAILED,
consts.DEPLOY_STATE_ABORTING_CONFIG: consts.DEPLOY_STATE_CONFIG_FAILED,
consts.DEPLOY_STATE_MIGRATING_DATA: consts.DEPLOY_STATE_DATA_MIGRATION_FAILED,
consts.DEPLOY_STATE_PRE_RESTORE: consts.DEPLOY_STATE_RESTORE_PREP_FAILED,
consts.DEPLOY_STATE_RESTORING: consts.DEPLOY_STATE_RESTORE_FAILED,
consts.DEPLOY_STATE_PRE_REHOME: consts.DEPLOY_STATE_REHOME_PREP_FAILED,

View File

@ -2653,7 +2653,7 @@ class TestSubcloudsPatchRedeploy(BaseTestSubcloudsPatch):
]
def test_patch_redeploy_succeeds_without_release_version(self):
"""Test patch redeploy succeeds withou release version"""
"""Test patch redeploy succeeds without release version"""
response = self._send_request()
@ -2884,6 +2884,7 @@ class TestSubcloudsPatchPrestage(BaseTestSubcloudsPatch):
self.mock_sysinv_client_cutils = self.mock_sysinv_client
self._mock_software_client(cutils.software_v1)
self.mock_software_client().show_deploy.return_value = None
self.original_get_validated_sw_version_for_prestage = (
cutils.get_validated_sw_version_for_prestage
)
@ -2895,8 +2896,6 @@ class TestSubcloudsPatchPrestage(BaseTestSubcloudsPatch):
self._setup_mock_get_system_controller_software_list()
def _setup_mock_sysinv_client_prestage(self):
self.mock_sysinv_client_prestage().get_upgrades.return_value = []
mock_get_system = mock.MagicMock()
mock_get_system.system_mode = consts.SYSTEM_MODE_SIMPLEX
self.mock_sysinv_client_prestage().get_system.return_value = mock_get_system
@ -3015,26 +3014,6 @@ class TestSubcloudsPatchPrestage(BaseTestSubcloudsPatch):
"cannot prestage for software deploy.",
)
def test_prestage_for_sw_deploy_fails_with_software_not_ready_to_be_deployed(self):
"""Test prestage for sw deploy fails with software not ready to be deployed"""
self.params["for_sw_deploy"] = "true"
self.mock_get_validated_sw_version_for_prestage.side_effect = (
self.original_get_validated_sw_version_for_prestage
)
self.software_list = self.FAKE_SOFTWARE_LIST_ONE_DEPLOYED_ONE_AVAILABLE_RELEASE
self._setup_mock_get_system_controller_software_list()
response = self._send_request()
self._assert_pecan_and_response(
response,
http.client.BAD_REQUEST,
f"Prestage failed '{self.subcloud.name}': All releases must first be "
"deployed, cannot prestage for software deploy.",
)
def test_prestage_for_sw_deploy_fails_with_invalid_release(self):
"""Test prestage for sw deploy fails with invalid release"""
@ -3260,10 +3239,12 @@ class TestSubcloudsPatchPrestage(BaseTestSubcloudsPatch):
f"Invalid value for force option: {self.params['force']}",
)
def test_patch_prestage_fails_with_system_controller_upgrade(self):
"""Test patch prestage fails with system controller upgrade"""
def test_patch_prestage_fails_with_system_controller_software_deploy(self):
"""Test patch prestage fails when system controller has a deploy in-progress"""
self.mock_sysinv_client_prestage().get_upgrades.return_value = ["upgrade"]
self.mock_software_client().show_deploy.return_value = [
{"to_release": "24.09.0"}
]
response = self._send_request()
@ -3271,7 +3252,7 @@ class TestSubcloudsPatchPrestage(BaseTestSubcloudsPatch):
response,
http.client.BAD_REQUEST,
"Prestage failed 'SystemController': Prestage operations are not "
"allowed while system controller upgrade is in progress.",
"allowed while system controller has a software deployment in progress.",
)
def test_patch_prestage_fails_without_payload(self):

View File

@ -26,6 +26,7 @@ from dcmanager.common import consts
from dcmanager.common import context
from dcmanager.common import exceptions
from dcmanager.common import prestage
from dcmanager.common import utils as cutils
from dcmanager.db.sqlalchemy import api as db_api
from dcmanager.orchestrator import sw_update_manager
@ -570,7 +571,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
self.assertEqual(subcloud_ids[index], strategy_step.subcloud_id)
@mock.patch.object(prestage, "initial_subcloud_validate")
@mock.patch.object(prestage, "_get_system_controller_upgrades")
@mock.patch.object(cutils, "get_system_controller_deploy", return_value=None)
def test_create_sw_prestage_strategy_no_password(
self,
mock_controller_upgrade,
@ -618,7 +619,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
payload=data,
)
@mock.patch.object(prestage, "_get_system_controller_upgrades")
@mock.patch.object(cutils, "get_system_controller_deploy", return_value=None)
def test_create_sw_prestage_strategy_backup_in_progress(
self,
mock_controller_upgrade,
@ -855,7 +856,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
um.create_sw_update_strategy(self.ctxt, payload=data)
@mock.patch.object(prestage, "initial_subcloud_validate")
@mock.patch.object(prestage, "_get_system_controller_upgrades")
@mock.patch.object(cutils, "get_system_controller_deploy", return_value=None)
def test_create_sw_prestage_strategy_parallel(
self,
mock_controller_upgrade,
@ -1322,7 +1323,7 @@ class TestSwUpdateManager(base.DCManagerTestCase):
)
@mock.patch.object(prestage, "_get_prestage_subcloud_info")
@mock.patch.object(prestage, "_get_system_controller_upgrades")
@mock.patch.object(cutils, "get_system_controller_deploy", return_value=None)
def test_create_sw_prestage_strategy_duplex(
self,
mock_controller_upgrade,

View File

@ -28,14 +28,11 @@ from oslo_log import log as logging
from oslo_service.wsgi import Request
from oslo_utils._i18n import _
import psutil
import tsconfig.tsconfig as tsc
import webob.dec
import webob.exc
from webob import Response
from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dcmanager.rpc import client as dcmanager_rpc_client
from dcorch.api.proxy.apps.dispatcher import APIDispatcher
from dcorch.api.proxy.apps.proxy import Proxy
@ -432,32 +429,6 @@ class SysinvAPIController(APIController):
request = req
request.body = req.body
# load-import is stored in dc-vault and on /scratch temporary
# folder to be processed by sysinv
if self._is_load_import(request.path):
req_body = self._store_load_to_vault(req)
params_dict = request.POST
try:
# If load import is done with --local, the params active
# and inactive comes from the request body.
# If not done with --local, the params comes from request.POST
# in this case, the decode below will raise an exception
# and params_dict will continue point to request.POST
params_dict = json.loads(request.body.decode("utf-8"))
except UnicodeDecodeError:
pass
if "active" in params_dict:
req_body["active"] = params_dict["active"]
if "inactive" in params_dict:
req_body["inactive"] = params_dict["inactive"]
# sysinv will handle a simple application/json request
# with the file location
req.content_type = "application/json"
req.body = json.dumps(req_body).encode("utf8")
application = self.process_request(req)
response = req.get_response(application)
return self.process_response(environ, request, response)
@ -486,14 +457,7 @@ class SysinvAPIController(APIController):
resource_type = self._get_resource_type_from_environ(environ)
operation_type = proxy_utils.get_operation_type(environ)
if self.get_status_code(response) in self.OK_STATUS_CODE:
if resource_type == consts.RESOURCE_TYPE_SYSINV_LOAD:
if operation_type == consts.OPERATION_TYPE_POST:
new_load = json.loads(response.body)
self._save_load_to_vault(new_load["software_version"])
else:
sw_version = json.loads(response.body)["software_version"]
self._remove_load_from_vault(sw_version)
elif resource_type == consts.RESOURCE_TYPE_SYSINV_DEVICE_IMAGE:
if resource_type == consts.RESOURCE_TYPE_SYSINV_DEVICE_IMAGE:
notify = True
if operation_type == consts.OPERATION_TYPE_POST:
resp = json.loads(response.body)
@ -513,114 +477,11 @@ class SysinvAPIController(APIController):
else:
self._enqueue_work(environ, request, response)
self.notify(environ, self.ENDPOINT_TYPE)
else:
if (
resource_type == consts.RESOURCE_TYPE_SYSINV_LOAD
and operation_type == consts.OPERATION_TYPE_POST
):
self._check_load_in_vault()
return response
finally:
proxy_utils.cleanup(environ)
def _is_load_import(self, path):
return path in proxy_consts.LOAD_PATHS
def _is_active_load(self, sw_version):
if sw_version == tsc.SW_VERSION:
return True
return False
def _save_load_to_vault(self, sw_version):
versioned_vault = os.path.join(proxy_consts.LOAD_VAULT_DIR, sw_version)
try:
# Remove any existing loads in the vault. At this point sysinv has
# validated/added the load so we must match the DC vault to that.
LOG.info("_save_load_to_vault remove prior %s" % sw_version)
self._remove_load_from_vault(sw_version)
if not os.path.isdir(versioned_vault):
# Check if the temporary folder exists
if not os.path.isdir(proxy_consts.LOAD_VAULT_TMP_DIR):
msg = _(
"Failed to store load in vault. Please check "
"dcorch log for details."
)
LOG.error(
"_save_load_to_vault failed: %s does not exist."
% proxy_consts.LOAD_VAULT_TMP_DIR
)
raise webob.exc.HTTPInternalServerError(explanation=msg)
# Check the number of files in the temp folder
load_path = proxy_consts.LOAD_VAULT_TMP_DIR
load_files = [
f
for f in os.listdir(load_path)
if os.path.isfile(os.path.join(load_path, f))
]
if len(load_files) != len(proxy_consts.IMPORT_LOAD_FILES):
msg = _(
"Failed to store load in vault. Please check "
"dcorch log for details."
)
LOG.error("_save_load_to_vault failed to store load in vault")
raise webob.exc.HTTPInsufficientStorage(explanation=msg)
# Move the folder to the final location
shutil.move(proxy_consts.LOAD_VAULT_TMP_DIR, versioned_vault)
LOG.info("Load (%s) saved to vault." % sw_version)
except Exception:
msg = _(
"Failed to store load in vault. Please check dcorch log for details."
)
raise webob.exc.HTTPInsufficientStorage(explanation=msg)
def _remove_load_from_vault(self, sw_version):
versioned_vault = os.path.join(proxy_consts.LOAD_VAULT_DIR, sw_version)
if os.path.isdir(versioned_vault):
shutil.rmtree(versioned_vault)
LOG.info("Load (%s) removed from vault." % sw_version)
def _check_load_in_vault(self):
if not os.path.exists(proxy_consts.LOAD_VAULT_DIR):
# The vault directory has not even been created. This must
# be the very first load-import request which failed.
return
elif len(os.listdir(proxy_consts.LOAD_VAULT_DIR)) == 0:
try:
ks_client = OpenStackDriver(
region_name=dccommon_consts.DEFAULT_REGION_NAME, region_clients=None
).keystone_client
sysinv_client = SysinvClient(
dccommon_consts.DEFAULT_REGION_NAME,
ks_client.session,
endpoint=ks_client.endpoint_cache.get_endpoint("sysinv"),
)
loads = sysinv_client.get_loads()
except Exception:
# Shouldn't be here
LOG.exception("Failed to get list of loads.")
return
else:
if len(loads) > proxy_consts.IMPORTED_LOAD_MAX_COUNT:
# The previous load regardless of its current state
# was mistakenly imported without the proxy.
msg = _(
"Previous load was not imported in the right "
"region. Please remove the previous load and "
"re-import it using 'SystemController' region."
)
raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
else:
# Remove temp load dir
if os.path.exists(proxy_consts.LOAD_VAULT_TMP_DIR):
shutil.rmtree(proxy_consts.LOAD_VAULT_TMP_DIR)
def _copy_device_image_to_vault(self, src_filepath, dst_filename):
try:
if not os.path.isdir(proxy_consts.DEVICE_IMAGE_VAULT_DIR):
@ -637,24 +498,6 @@ class SysinvAPIController(APIController):
)
raise webob.exc.HTTPInsufficientStorage(explanation=msg)
def _copy_load_to_vault_for_validation(self, src_filepath):
try:
validation_vault_dir = proxy_consts.LOAD_VAULT_TMP_DIR
if not os.path.isdir(validation_vault_dir):
os.makedirs(validation_vault_dir)
load_file_path = os.path.join(
validation_vault_dir, os.path.basename(src_filepath)
)
shutil.copyfile(src_filepath, load_file_path)
LOG.info("copied %s to %s" % (src_filepath, load_file_path))
except Exception as e:
msg = _(
"Failed to store load in vault. Please check "
"dcorch log for more details: %s" % e
)
raise webob.exc.HTTPInsufficientStorage(explanation=msg)
return load_file_path
def _upload_file(self, file_item):
try:
staging_dir = proxy_consts.LOAD_FILES_STAGING_DIR
@ -673,7 +516,7 @@ class SysinvAPIController(APIController):
if source_file is None:
LOG.error(
"Failed to upload load file %s, invalid file object" % staging_file
"Failed to upload file %s, invalid file object" % staging_file
)
return None
@ -693,7 +536,7 @@ class SysinvAPIController(APIController):
avail_space = psutil.disk_usage("/scratch").free
if avail_space < file_size:
LOG.error(
"Failed to upload load file %s, not enough space on /scratch"
"Failed to upload file %s, not enough space on /scratch"
" partition: %d bytes available " % (staging_file, avail_space)
)
return None
@ -710,7 +553,7 @@ class SysinvAPIController(APIController):
except subprocess.CalledProcessError as e:
LOG.error(
"Failed to upload load file %s, /usr/bin/fallocate error: %s"
"Failed to upload file %s, /usr/bin/fallocate error: %s"
% (staging_file, e.output)
)
if os.path.isfile(staging_file):
@ -719,84 +562,11 @@ class SysinvAPIController(APIController):
except Exception:
if os.path.isfile(staging_file):
os.remove(staging_file)
LOG.exception("Failed to upload load file %s" % file_item.filename)
LOG.exception("Failed to upload file %s" % file_item.filename)
return None
return staging_file
def _store_load_to_vault(self, request):
class LocalLoadFile(object):
def __init__(self, filename):
self._filename = filename
self._file = open(filename, "rb")
def __del__(self):
self._file.close()
@property
def filename(self):
return self._filename
@property
def file(self):
return self._file
load_files = dict()
# Flag to cleanup staging files in case of errors
error = True
try:
for file in proxy_consts.IMPORT_LOAD_FILES:
if request.content_type == "application/json":
request_body = dict(json.loads(request.body))
if file not in request_body:
msg = _("Missing required file for %s" % file)
raise webob.exc.HTTPInternalServerError(explanation=msg)
if not os.path.exists(request_body[file]):
msg = _(
"File %s does not exist on the active controller"
% request_body[file]
)
raise webob.exc.HTTPInternalServerError(explanation=msg)
file_item = LocalLoadFile(request_body[file])
else:
if file not in request.POST:
msg = _("Missing required file for %s" % file)
raise webob.exc.HTTPInternalServerError(explanation=msg)
file_item = request.POST[file]
if not file_item.filename:
msg = _("No %s file uploaded" % file)
raise webob.exc.HTTPInternalServerError(explanation=msg)
staging_file = self._upload_file(file_item)
if file in request.POST:
request.POST[file] = staging_file
if staging_file:
self._copy_load_to_vault_for_validation(staging_file)
load_files.update({file: staging_file})
else:
msg = _(
"Failed to save file %s to disk. Please check dcorch "
"logs for details." % file_item.filename
)
raise webob.exc.HTTPInternalServerError(explanation=msg)
LOG.info("Load files: %s saved to disk." % load_files)
error = False
except webob.exc.HTTPInternalServerError:
raise
except Exception as e:
msg = _("Unexpected error copying load to vault: %s" % e)
raise webob.exc.HTTPInternalServerError(explanation=msg)
finally:
if error and os.path.exists(proxy_consts.LOAD_FILES_STAGING_DIR):
shutil.rmtree(proxy_consts.LOAD_FILES_STAGING_DIR)
return load_files
def _store_image_file(self, file_item, dst_filename):
# First, upload file to a temporary location
fn = self._upload_file(file_item)
@ -866,10 +636,6 @@ class SysinvAPIController(APIController):
resource_ids = [str(res.get("signature")) for res in resource]
else:
resource_ids = [resource.get("signature")]
elif resource_type == consts.RESOURCE_TYPE_SYSINV_LOAD:
if operation_type == consts.OPERATION_TYPE_DELETE:
resource_id = json.loads(response.body)["software_version"]
resource_ids = [resource_id]
else:
resource_id = self.get_resource_id_from_link(request_header)
resource_ids = [resource_id]

View File

@ -76,8 +76,6 @@ CERTIFICATE_PATHS = ["/v1/certificate/certificate_install", "/v1/certificate/{uu
USER_PATHS = ["/v1/iuser/{uuid}"]
LOAD_PATHS = ["/v1/loads/import_load", "/v1/loads/{id}"]
RELEASE_PATHS = ["//v1/release", "//v1/release/{rel_id}"]
DEVICE_IMAGE_PATHS = ["/v1/device_images", "/v1/device_images/{uuid}"]
@ -85,7 +83,6 @@ DEVICE_IMAGE_PATHS = ["/v1/device_images", "/v1/device_images/{uuid}"]
SYSINV_PATH_MAP = {
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE: CERTIFICATE_PATHS,
consts.RESOURCE_TYPE_SYSINV_USER: USER_PATHS,
consts.RESOURCE_TYPE_SYSINV_LOAD: LOAD_PATHS,
consts.RESOURCE_TYPE_SYSINV_DEVICE_IMAGE: DEVICE_IMAGE_PATHS,
}
@ -94,8 +91,6 @@ USM_PATH_MAP = {
}
LOAD_FILES_STAGING_DIR = "/scratch/tmp_load"
IMPORT_LOAD_FILES = ["path_to_iso", "path_to_sig"]
IMPORTED_LOAD_MAX_COUNT = 1
DEVICE_IMAGE_VAULT_DIR = "/opt/dc-vault/device_images"
@ -330,7 +325,6 @@ ROUTE_METHOD_MAP = {
dccommon_consts.ENDPOINT_TYPE_PLATFORM: {
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE: ["POST", "DELETE"],
consts.RESOURCE_TYPE_SYSINV_USER: ["PATCH", "PUT"],
consts.RESOURCE_TYPE_SYSINV_LOAD: ["POST", "DELETE"],
consts.RESOURCE_TYPE_SYSINV_DEVICE_IMAGE: ["POST", "PATCH", "DELETE"],
},
consts.ENDPOINT_TYPE_NETWORK: {
@ -370,8 +364,5 @@ ROUTE_METHOD_MAP = {
},
}
LOAD_VAULT_DIR = "/opt/dc-vault/loads"
LOAD_VAULT_TMP_DIR = "/opt/dc-vault/loads/load_tmpdir"
ENDPOINT_TYPE_PATCHING_TMPDIR = "/scratch/patch-api-proxy-tmpdir"
ENDPOINT_TYPE_PLATFORM_TMPDIR = "/scratch/platform-api-proxy-tmpdir"
ENDPOINT_TYPE_USM_TMPDIR = "/scratch/software-upload-tmpdir"

View File

@ -100,12 +100,10 @@ def main():
)
systemd.notify_once()
# For patching and platorm, create a temp directory under /scratch
# For platorm and usm, create a temp directory under /scratch
# and set TMPDIR environment variable to this directory, so that
# the file created using tempfile will not use the default directory.
if CONF.type == consts.ENDPOINT_TYPE_PATCHING:
make_tempdir(constants.ENDPOINT_TYPE_PATCHING_TMPDIR)
elif CONF.type == consts.ENDPOINT_TYPE_PLATFORM:
if CONF.type == consts.ENDPOINT_TYPE_PLATFORM:
make_tempdir(constants.ENDPOINT_TYPE_PLATFORM_TMPDIR)
elif CONF.type == consts.ENDPOINT_TYPE_USM:
make_tempdir(constants.ENDPOINT_TYPE_USM_TMPDIR)

View File

@ -34,7 +34,6 @@ ORCH_REQUEST_ABORTED = "aborted"
RESOURCE_TYPE_SYSINV_CERTIFICATE = "certificates"
RESOURCE_TYPE_SYSINV_USER = "iuser"
RESOURCE_TYPE_SYSINV_FERNET_REPO = "fernet_repo"
RESOURCE_TYPE_SYSINV_LOAD = "loads"
RESOURCE_TYPE_USM_RELEASE = "release"
RESOURCE_TYPE_SYSINV_DEVICE_IMAGE = "device_image"