Merge remote-tracking branch 'gerrit/master' into f/centos8

Change-Id: I85724a269314c46969c064ec52ad05ac7fffebd4
Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
This commit is contained in:
Shuicheng Lin 2020-05-21 19:50:27 +08:00
commit 539d476456
132 changed files with 6332 additions and 946 deletions

View File

@ -2036,7 +2036,7 @@ itemNotFound (404)
:: ::
{ {
"istors":[ "istors":[
{ {
"function":"osd", "function":"osd",
@ -5721,6 +5721,14 @@ itemNotFound (404)
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage." "links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created." "created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated." "updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
"needs_firmware_update (optional) ", "plain", "xsd:string", "Indicates whether the device requires firmware update."
"status (optional) ", "plain", "xsd:string", "The status of firmware update of the device."
"root_key (optional) ", "plain", "xsd:string", "The root key of the FPGA device."
"revoked_key_ids (optional) ", "plain", "xsd:string", "The revoked key ids of the FPGA device."
"boot_page (optional) ", "plain", "xsd:string", "The boot page of the FPGA device."
"bitstream_id (optional) ", "plain", "xsd:string", "The bitstream id of the FPGA device."
"bmc_build_version (optional) ", "plain", "xsd:string", "The BMC build version of the FPGA device."
"bmc_fw_version (optional) ", "plain", "xsd:string", "The BMC firmware version of the FPGA device."
:: ::
@ -6109,7 +6117,47 @@ itemNotFound (404)
"psvendor": "", "psvendor": "",
"enabled": "False", "enabled": "False",
"name": "pci_0000_00_0b_0" "name": "pci_0000_00_0b_0"
} },
{
"links": [
{
"href": "http://192.168.204.1:6385/v1/pci_devices/3ab614a6-3906-4c55-8114-4d78a6dde445",
"rel": "self"
},
{
"href": "http://192.168.204.1:6385/pci_devices/3ab614a6-3906-4c55-8114-4d78a6dde445",
"rel": "bookmark"
}
],
"enabled": true,
"updated_at": "2020-05-04T18:54:03.679744+00:00",
"needs_firmware_update": false,
"bitstream_id": null,
"uuid": "3ab614a6-3906-4c55-8114-4d78a6dde445",
"pdevice": "Device 0b30",
"boot_page": null,
"psvendor": "Intel Corporation",
"psdevice": "Device 0000",
"pclass_id": "120000",
"pvendor": "Intel Corporation",
"status": null,
"sriov_numvfs": 0,
"driver": "intel-fpga-pci",
"bmc_fw_version": null,
"root_key": null,
"host_uuid": "35436a7d-ce05-4e5f-87ac-706fe7513ece",
"bmc_build_version": null,
"name": "pci_0000_b3_00_0",
"revoked_key_ids": null,
"numa_node": 1,
"created_at": "2020-05-04T18:23:34.697710+00:00",
"pdevice_id": "0b30",
"pclass": "Processing accelerators",
"sriov_vfs_pci_address": "",
"sriov_totalvfs": 1,
"pciaddr": "0000:b3:00.0",
"pvendor_id": "8086"
},
] ]
} }
@ -6310,6 +6358,531 @@ badMediaType (415)
"pvendor_id": "8086" "pvendor_id": "8086"
} }
--------------
Device images
--------------
************************
List the device images
************************
.. rest_method:: GET /v1/device_images
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_images (Optional)", "plain", "xsd:list", "The list of device images."
"bitstream_type (Optional)", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor (Optional)", "plain", "xsd:string", "The vendor ID of the pci device."
"pci_device (Optional)", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key signature of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels":
{
"key1": "value1",
"key2": "value2"
},
},
{
"uuid": "09100124-5ae9-44d8-aefc-a192b8f27360",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "root-key",
"bitstream_id": null
"key_signature": "a123",
"revoke_key_id": null,
"name": "Image name",
"description": null,
"image_version": null,
"applied_labels": null,
},
{
"uuid": "ef4c39b1-81e9-42dd-b850-06fc8833b47c",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "key-revocation",
"bitstream_id": null
"key_signature": null,
"revoke_key_id": 123,
"name": "Image name",
"description": null,
"image_version": null,
"applied_labels": null,
},
]
}
This operation does not accept a request body.
**************************************************
Shows attributes of the Device Image object
**************************************************
.. rest_method:: GET /v1/device_images/{image_id}
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_images (Optional)", "plain", "xsd:list", "The list of device images."
"bitstream_type (Optional)", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor (Optional)", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device (Optional)", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels":
{
"key1": "value1",
"key2": "value2"
},
}
]
}
************************
Creates a device image
************************
.. rest_method:: POST /v1/device_image
**Normal response codes**
200
**Error response codes**
badMediaType (415)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image. Valid types are ``functional``, ``root-key``, ``key-revocation``"
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image. Required for bitstream type ``functional`` "
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels": null
}
]
}
************************************************
Applies the device image to all hosts or label
************************************************
.. rest_method:: PATCH /v1/device_images/{image_id}?action=apply
**Normal response codes**
200
**Error response codes**
badMediaType (415)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
"device_label (Optional)", "plain", "xsd:string", "The key-value paired device label assigned to a device."
::
{
"key1": "value1"
}
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels":
{
"key1": "value1"
},
}
]
}
*******************************************
Remove the device image from host or label
*******************************************
.. rest_method:: PATCH /v1/device_images/{image_id}?action=remove
**Normal response codes**
200
**Error response codes**
badMediaType (415)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
"device_label (Optional)", "plain", "xsd:string", "The key-value paired device label assigned to a device."
::
{
"key1": "value1"
}
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels": null
}
]
}
*****************************
Deletes a device image
*****************************
.. rest_method:: DELETE /v1/device_images/{image_id}
**Normal response codes**
204
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
This operation does not accept a request body.
--------------
Device labels
--------------
************************
List the device labels
************************
.. rest_method:: GET /v1/device_labels
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_labels ", "plain", "xsd:list", "The list of device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
"pcidevice_uuid ", "plain", "csapi:UUID", "The universally unique identifier for the pci device object."
"host_uuid ", "plain", "csapi:UUID", "The universally unique identifier for the host object."
"label_key ", "plain", "xsd:string", "The key of the device label."
"label_value ", "plain", "xsd:string", "The value of the device label."
::
{
"device_labels": [
{
"uuid": "fe26ca98-35d4-43b7-8c51-f0ca957b35e1",
"pcidevice_uuid": "64641c6d-4fdd-4ecb-9c66-a68982267b6d",
"host_uuid": "32be8077-1174-46cf-8309-48c107765ffc"
"label_key": "key1",
"label_value": "value1",
},
{
"uuid": "60342a18-a686-48c4-8e71-13a005ffda1b",
"pcidevice_uuid": "9d69d492-9888-4d85-90d0-e52def926b17",
"host_uuid": "32be8077-1174-46cf-8309-48c107765ffc"
"label_key": "key5",
"label_value": "value5",
},
]
}
*************************************
Assign device label to a pci device
*************************************
.. rest_method:: POST /v1/device_labels
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"pcidevice_uuid", "URI", "csapi:UUID", "The unique identifier of a pci device."
"device_labels", "URI", "xsd:list", "List of key-value paired of device labels."
::
{
"pcidevice_uuid": "da98f600-49cf-4f0e-b14e-15ef91069fe8",
"key1": "value1",
"key2": "value2"
}
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"uuid", "URI", "csapi:UUID", "The unique identifier of the device label object."
"pcidevice_uuid", "URI", "csapi:UUID", "The unique identifier of a pci device."
"label_key", "URI", "xsd:string", "The label key of device labels."
"label_value", "URI", "xsd:string", "The label value of device labels."
::
{
"device_labels": [
{
"uuid": "66daffb1-72ee-4e6e-9489-206c5eeaec94",
"pcidevice_uuid": "da98f600-49cf-4f0e-b14e-15ef91069fe8",
"label_key": "key1",
"label_value": "value1",
},
{
"uuid": "2e7821ed-e373-4cb8-a47b-f70ff2558dfd",
"pcidevice_uuid": "da98f600-49cf-4f0e-b14e-15ef91069fe8",
"label_key": "key2",
"label_value": "value2",
}
]
}
************************
Deletes a device label
************************
.. rest_method:: DELETE /v1/device_labels/{device_label_uuid}
**Normal response codes**
204
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_label_uuid", "URI", "csapi:UUID", "The unique identifier of a device label."
This operation does not accept a request body.
------------------ ------------------
Service Parameter Service Parameter
------------------ ------------------

View File

@ -1,2 +1,2 @@
SRC_DIR="files" SRC_DIR="files"
TIS_PATCH_VER=0 TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,5 +1,5 @@
[Unit] [Unit]
Description=General TIS config gate Description=General StarlingX config gate
After=sw-patch.service After=sw-patch.service
Before=serial-getty@ttyS0.service getty@tty1.service Before=serial-getty@ttyS0.service getty@tty1.service
# Each config service must have a Before statement against config.service, to ensure ordering # Each config service must have a Before statement against config.service, to ensure ordering

View File

@ -1,2 +1,2 @@
SRC_DIR="controllerconfig" SRC_DIR="controllerconfig"
TIS_PATCH_VER=152 TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -58,6 +58,7 @@ install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/
install -d -m 755 %{buildroot}%{local_bindir} install -d -m 755 %{buildroot}%{local_bindir}
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
install -p -D -m 700 scripts/upgrade_swact_migration.py %{buildroot}%{local_bindir}/upgrade_swact_migration.py
install -d -m 755 %{buildroot}%{local_goenabledd} install -d -m 755 %{buildroot}%{local_goenabledd}
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
@ -67,7 +68,7 @@ install -p -D -m 755 scripts/controller_config %{buildroot}%{local_etc_initd}/co
# Install Upgrade scripts # Install Upgrade scripts
install -d -m 755 %{buildroot}%{local_etc_upgraded} install -d -m 755 %{buildroot}%{local_etc_upgraded}
install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/ # install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/
install -d -m 755 %{buildroot}%{local_etc_systemd} install -d -m 755 %{buildroot}%{local_etc_systemd}
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service
@ -89,7 +90,7 @@ rm -rf $RPM_BUILD_ROOT
%{local_goenabledd}/* %{local_goenabledd}/*
%{local_etc_initd}/* %{local_etc_initd}/*
%dir %{local_etc_upgraded} %dir %{local_etc_upgraded}
%{local_etc_upgraded}/* # %{local_etc_upgraded}/*
%{local_etc_systemd}/* %{local_etc_systemd}/*
%package wheels %package wheels

View File

@ -18,5 +18,3 @@ KEYRING_WORKDIR = '/tmp/python_keyring'
KEYRING_PERMDIR = tsconfig.KEYRING_PATH KEYRING_PERMDIR = tsconfig.KEYRING_PATH
INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete' INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'
BACKUPS_PATH = '/opt/backups'

View File

@ -23,7 +23,6 @@ import tempfile
import time import time
import yaml import yaml
from sysinv.common import constants as sysinv_constants from sysinv.common import constants as sysinv_constants
@ -52,6 +51,7 @@ LOG = log.getLogger(__name__)
POSTGRES_MOUNT_PATH = '/mnt/postgresql' POSTGRES_MOUNT_PATH = '/mnt/postgresql'
POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump' POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump'
DB_CONNECTION_FORMAT = "connection=postgresql://%s:%s@127.0.0.1/%s\n" DB_CONNECTION_FORMAT = "connection=postgresql://%s:%s@127.0.0.1/%s\n"
DB_BARBICAN_CONNECTION_FORMAT = "postgresql://%s:%s@127.0.0.1/%s"
restore_patching_complete = '/etc/platform/.restore_patching_complete' restore_patching_complete = '/etc/platform/.restore_patching_complete'
restore_compute_ready = '/var/run/.restore_compute_ready' restore_compute_ready = '/var/run/.restore_compute_ready'
@ -103,7 +103,8 @@ def get_db_credentials(shared_services, from_release):
def get_shared_services(): def get_shared_services():
""" Get the list of shared services from the sysinv database """ """ Get the list of shared services from the sysinv database"""
shared_services = [] shared_services = []
DEFAULT_SHARED_SERVICES = [] DEFAULT_SHARED_SERVICES = []
@ -114,6 +115,7 @@ def get_shared_services():
if row is None: if row is None:
LOG.error("Failed to fetch i_system data") LOG.error("Failed to fetch i_system data")
raise psycopg2.ProgrammingError("Failed to fetch i_system data") raise psycopg2.ProgrammingError("Failed to fetch i_system data")
cap_obj = json.loads(row[0]) cap_obj = json.loads(row[0])
region_config = cap_obj.get('region_config', None) region_config = cap_obj.get('region_config', None)
if region_config: if region_config:
@ -127,7 +129,10 @@ def get_connection_string(db_credentials, database):
""" Generates a connection string for a given database""" """ Generates a connection string for a given database"""
username = db_credentials[database]['username'] username = db_credentials[database]['username']
password = db_credentials[database]['password'] password = db_credentials[database]['password']
return DB_CONNECTION_FORMAT % (username, password, database) if database == 'barbican':
return DB_BARBICAN_CONNECTION_FORMAT % (username, password, database)
else:
return DB_CONNECTION_FORMAT % (username, password, database)
def create_temp_filesystem(vgname, lvname, mountpoint, size): def create_temp_filesystem(vgname, lvname, mountpoint, size):
@ -260,6 +265,50 @@ def migrate_pxeboot_config(from_release, to_release):
raise raise
def migrate_armada_config(from_release, to_release):
""" Migrates armada configuration. """
LOG.info("Migrating armada config")
devnull = open(os.devnull, 'w')
# Copy the entire armada.cfg directory to pick up any changes made
# after the data was migrated (i.e. updates to the controller-1 load).
source_armada = os.path.join(PLATFORM_PATH, "armada", from_release)
dest_armada = os.path.join(PLATFORM_PATH, "armada", to_release)
try:
subprocess.check_call(
["cp",
"-a",
os.path.join(source_armada),
os.path.join(dest_armada)],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to migrate %s" % source_armada)
raise
def migrate_helm_config(from_release, to_release):
""" Migrates helm configuration. """
LOG.info("Migrating helm config")
devnull = open(os.devnull, 'w')
# Copy the entire helm.cfg directory to pick up any changes made
# after the data was migrated (i.e. updates to the controller-1 load).
source_helm = os.path.join(PLATFORM_PATH, "helm", from_release)
dest_helm = os.path.join(PLATFORM_PATH, "helm", to_release)
try:
subprocess.check_call(
["cp",
"-a",
os.path.join(source_helm),
os.path.join(dest_helm)],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to migrate %s" % source_helm)
raise
def migrate_sysinv_data(from_release, to_release): def migrate_sysinv_data(from_release, to_release):
""" Migrates sysinv data. """ """ Migrates sysinv data. """
devnull = open(os.devnull, 'w') devnull = open(os.devnull, 'w')
@ -425,45 +474,44 @@ def create_databases(from_release, to_release, db_credentials):
""" Creates databases. """ """ Creates databases. """
LOG.info("Creating new databases") LOG.info("Creating new databases")
if from_release == '18.03': # Create databases that are new in this release
# Create databases that are new in this release
conn = psycopg2.connect('dbname=postgres user=postgres') conn = psycopg2.connect('dbname=postgres user=postgres')
# Postgres won't allow transactions around database create operations # Postgres won't allow transactions around database create operations
# so we set the connection to autocommit # so we set the connection to autocommit
conn.set_isolation_level( conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
databases_to_create = [] databases_to_create = []
if not databases_to_create: if not databases_to_create:
return return
with conn: with conn:
with conn.cursor() as cur: with conn.cursor() as cur:
for database in databases_to_create: for database in databases_to_create:
print("Creating %s database" % database) print("Creating %s database" % database)
username = psycopg2.extensions.AsIs( username = psycopg2.extensions.AsIs(
'\"%s\"' % db_credentials[database]['username']) '\"%s\"' % db_credentials[database]['username'])
db_name = psycopg2.extensions.AsIs('\"%s\"' % database) db_name = psycopg2.extensions.AsIs('\"%s\"' % database)
password = db_credentials[database]['password'] password = db_credentials[database]['password']
try: try:
# Here we create the new database and the role for it # Here we create the new database and the role for it
# The role will be used by the dbsync command to # The role will be used by the dbsync command to
# connect to the database. This ensures any new tables # connect to the database. This ensures any new tables
# are added with the correct owner # are added with the correct owner
cur.execute('CREATE DATABASE %s', (db_name,)) cur.execute('CREATE DATABASE %s', (db_name,))
cur.execute('CREATE ROLE %s', (username,)) cur.execute('CREATE ROLE %s', (username,))
cur.execute('ALTER ROLE %s LOGIN PASSWORD %s', cur.execute('ALTER ROLE %s LOGIN PASSWORD %s',
(username, password)) (username, password))
cur.execute('GRANT ALL ON DATABASE %s TO %s', cur.execute('GRANT ALL ON DATABASE %s TO %s',
(db_name, username)) (db_name, username))
except Exception as ex: except Exception as ex:
LOG.exception("Failed to create database and role. " + LOG.exception("Failed to create database and role. " +
"(%s : %s) Exception: %s" % "(%s : %s) Exception: %s" %
(database, username, ex)) (database, username, ex))
raise raise
def migrate_sysinv_database(): def migrate_sysinv_database():
@ -497,15 +545,11 @@ def migrate_databases(from_release, shared_services, db_credentials,
f.write("[database]\n") f.write("[database]\n")
f.write(get_connection_string(db_credentials, 'keystone')) f.write(get_connection_string(db_credentials, 'keystone'))
with open("/etc/barbican/barbican-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(db_credentials, 'barbican'))
migrate_commands = [ migrate_commands = [
# Migrate barbican # Migrate barbican
('barbican', ('barbican',
'barbican-manage --config-file /etc/barbican/barbican-dbsync.conf ' + 'barbican-manage db upgrade ' +
'db upgrade'), '--db-url %s' % get_connection_string(db_credentials, 'barbican')),
] ]
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services: if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
@ -616,20 +660,19 @@ def migrate_hiera_data(from_release, to_release):
shutil.copy(os.path.join(from_hiera_path, f), to_hiera_path) shutil.copy(os.path.join(from_hiera_path, f), to_hiera_path)
# Make any necessary updates to the static yaml files. # Make any necessary updates to the static yaml files.
if from_release == "18.03": # Update the static.yaml file
# Update the static.yaml file static_file = os.path.join(constants.HIERADATA_PERMDIR, "static.yaml")
static_file = os.path.join(constants.HIERADATA_PERMDIR, "static.yaml") with open(static_file, 'r') as yaml_file:
with open(static_file, 'r') as yaml_file: static_config = yaml.load(yaml_file)
static_config = yaml.load(yaml_file) static_config.update({
static_config.update({ 'platform::params::software_version': SW_VERSION,
'platform::params::software_version': SW_VERSION, 'platform::client::credentials::params::keyring_directory':
'platform::client::credentials::params::keyring_directory': KEYRING_PATH,
KEYRING_PATH, 'platform::client::credentials::params::keyring_file':
'platform::client::credentials::params::keyring_file': os.path.join(KEYRING_PATH, '.CREDENTIAL'),
os.path.join(KEYRING_PATH, '.CREDENTIAL'), })
}) with open(static_file, 'w') as yaml_file:
with open(static_file, 'w') as yaml_file: yaml.dump(static_config, yaml_file, default_flow_style=False)
yaml.dump(static_config, yaml_file, default_flow_style=False)
def upgrade_controller(from_release, to_release): def upgrade_controller(from_release, to_release):
@ -667,6 +710,14 @@ def upgrade_controller(from_release, to_release):
print("Migrating pxeboot configuration...") print("Migrating pxeboot configuration...")
migrate_pxeboot_config(from_release, to_release) migrate_pxeboot_config(from_release, to_release)
# Migrate armada config
print("Migrating armada configuration...")
migrate_armada_config(from_release, to_release)
# Migrate helm config
print("Migrating helm configuration...")
migrate_helm_config(from_release, to_release)
# Migrate sysinv data. # Migrate sysinv data.
print("Migrating sysinv configuration...") print("Migrating sysinv configuration...")
migrate_sysinv_data(from_release, to_release) migrate_sysinv_data(from_release, to_release)
@ -768,6 +819,18 @@ def upgrade_controller(from_release, to_release):
LOG.info("Failed to update hiera configuration") LOG.info("Failed to update hiera configuration")
raise raise
# Prepare for swact
LOG.info("Prepare for swact to controller-1")
try:
subprocess.check_call(['/usr/bin/upgrade_swact_migration.py',
'prepare_swact',
from_release,
to_release],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed upgrade_swact_migration prepare_swact")
raise
print("Shutting down upgrade processes...") print("Shutting down upgrade processes...")
# Stop postgres service # Stop postgres service

View File

@ -15,7 +15,6 @@ import subprocess
import tsconfig.tsconfig as tsc import tsconfig.tsconfig as tsc
from controllerconfig.common import constants
from sysinv.common import constants as sysinv_constants from sysinv.common import constants as sysinv_constants
from controllerconfig.upgrades import utils from controllerconfig.upgrades import utils
@ -24,34 +23,21 @@ from oslo_log import log
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
def get_upgrade_databases(shared_services): def get_upgrade_databases(system_role, shared_services):
UPGRADE_DATABASES = ('postgres', 'template1', 'nova', 'sysinv', UPGRADE_DATABASES = ('postgres', 'template1', 'sysinv',
'ceilometer', 'neutron', 'heat', 'nova_api', 'aodh', 'barbican')
'magnum', 'ironic', 'barbican')
UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (), 'template1': (), UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (), 'template1': (),
'heat': (), 'nova': (), 'nova_api': (),
'sysinv': ('i_alarm',), 'sysinv': ('i_alarm',),
'neutron': (), 'barbican': ()}
'aodh': (),
'magnum': (),
'ironic': (),
'barbican': (),
'ceilometer': ('metadata_bool',
'metadata_float',
'metadata_int',
'metadata_text',
'meter', 'sample', 'fault',
'resource')}
if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services: if system_role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
UPGRADE_DATABASES += ('cinder',) UPGRADE_DATABASES += ('dcmanager', 'dcorch',)
UPGRADE_DATABASE_SKIP_TABLES.update({'cinder': ()}) UPGRADE_DATABASE_SKIP_TABLES.update({
'dcmanager': ('subcloud_alarms',),
if sysinv_constants.SERVICE_TYPE_IMAGE not in shared_services: 'dcorch': ()
UPGRADE_DATABASES += ('glance',) })
UPGRADE_DATABASE_SKIP_TABLES.update({'glance': ()})
if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services: if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
UPGRADE_DATABASES += ('keystone',) UPGRADE_DATABASES += ('keystone',)
@ -60,12 +46,12 @@ def get_upgrade_databases(shared_services):
return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES
def export_postgres(dest_dir, shared_services): def export_postgres(dest_dir, system_role, shared_services):
""" Export postgres databases """ """ Export postgres databases """
devnull = open(os.devnull, 'w') devnull = open(os.devnull, 'w')
try: try:
upgrade_databases, upgrade_database_skip_tables = \ upgrade_databases, upgrade_database_skip_tables = \
get_upgrade_databases(shared_services) get_upgrade_databases(system_role, shared_services)
# Dump roles, table spaces and schemas for databases. # Dump roles, table spaces and schemas for databases.
subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' + subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' +
'--schema-only > %s/%s' % '--schema-only > %s/%s' %
@ -121,7 +107,7 @@ def prepare_upgrade(from_load, to_load, i_system):
# Export databases # Export databases
shared_services = i_system.capabilities.get("shared_services", "") shared_services = i_system.capabilities.get("shared_services", "")
export_postgres(dest_dir, shared_services) export_postgres(dest_dir, i_system.distributed_cloud_role, shared_services)
export_vim(dest_dir) export_vim(dest_dir)
# Export filesystems so controller-1 can access them # Export filesystems so controller-1 can access them
@ -197,9 +183,18 @@ def create_simplex_backup(software_upgrade):
with open(metadata_filename, 'w') as metadata_file: with open(metadata_filename, 'w') as metadata_file:
metadata_file.write(json_data) metadata_file.write(json_data)
# TODO: Switch this over to use Ansible backup_filename = get_upgrade_backup_filename(software_upgrade)
# backup_filename = get_upgrade_backup_filename(software_upgrade) backup_vars = "platform_backup_file=%s.tgz backup_dir=%s" % (
# backup_restore.backup(backup_filename, constants.BACKUPS_PATH) backup_filename, tsc.PLATFORM_BACKUP_PATH)
args = [
'ansible-playbook',
'-e', backup_vars,
sysinv_constants.ANSIBLE_PLATFORM_BACKUP_PLAYBOOK]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = proc.communicate()
LOG.info(out)
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, args)
LOG.info("Create simplex backup complete") LOG.info("Create simplex backup complete")
@ -254,15 +249,16 @@ def abort_upgrade(from_load, to_load, upgrade):
# Remove upgrade directories # Remove upgrade directories
upgrade_dirs = [ upgrade_dirs = [
os.path.join(tsc.PLATFORM_PATH, "config", to_load), os.path.join(tsc.PLATFORM_PATH, "config", to_load),
os.path.join(tsc.PLATFORM_PATH, "armada", to_load),
os.path.join(tsc.PLATFORM_PATH, "helm", to_load),
os.path.join(tsc.ETCD_PATH, to_load),
os.path.join(utils.POSTGRES_PATH, "upgrade"), os.path.join(utils.POSTGRES_PATH, "upgrade"),
os.path.join(utils.POSTGRES_PATH, to_load), os.path.join(utils.POSTGRES_PATH, to_load),
os.path.join(utils.RABBIT_PATH, to_load), os.path.join(utils.RABBIT_PATH, to_load),
os.path.join(tsc.PLATFORM_PATH, "ironic", to_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", to_load), os.path.join(tsc.PLATFORM_PATH, "nfv/vim", to_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", to_load), os.path.join(tsc.PLATFORM_PATH, ".keyring", to_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", to_load), os.path.join(tsc.PLATFORM_PATH, "puppet", to_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", to_load), os.path.join(tsc.PLATFORM_PATH, "sysinv", to_load),
os.path.join(tsc.PLATFORM_PATH, "ceilometer", to_load),
os.path.join(tsc.CONFIG_PATH, 'upgrades') os.path.join(tsc.CONFIG_PATH, 'upgrades')
] ]
@ -274,7 +270,7 @@ def abort_upgrade(from_load, to_load, upgrade):
simplex_backup_filename = get_upgrade_backup_filename(upgrade) + "*" simplex_backup_filename = get_upgrade_backup_filename(upgrade) + "*"
simplex_backup_files = glob.glob(os.path.join( simplex_backup_files = glob.glob(os.path.join(
constants.BACKUPS_PATH, simplex_backup_filename)) tsc.PLATFORM_BACKUP_PATH, simplex_backup_filename))
for file in simplex_backup_files: for file in simplex_backup_files:
try: try:
@ -328,16 +324,12 @@ def complete_upgrade(from_load, to_load):
os.path.join(utils.POSTGRES_PATH, "upgrade"), os.path.join(utils.POSTGRES_PATH, "upgrade"),
os.path.join(utils.POSTGRES_PATH, from_load), os.path.join(utils.POSTGRES_PATH, from_load),
os.path.join(utils.RABBIT_PATH, from_load), os.path.join(utils.RABBIT_PATH, from_load),
os.path.join(tsc.PLATFORM_PATH, "ironic", from_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", from_load), os.path.join(tsc.PLATFORM_PATH, "nfv/vim", from_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load), os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", from_load), os.path.join(tsc.PLATFORM_PATH, "puppet", from_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load), os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load),
] ]
upgrade_dirs.append(
os.path.join(tsc.PLATFORM_PATH, "ceilometer", from_load))
for directory in upgrade_dirs: for directory in upgrade_dirs:
try: try:
shutil.rmtree(directory) shutil.rmtree(directory)

View File

@ -319,6 +319,30 @@ start()
fi fi
fi fi
if [ -e $CONFIG_DIR/admin-ep-cert.pem ]
then
cp $CONFIG_DIR/admin-ep-cert.pem /etc/ssl/private/
if [ $? -ne 0 ]
then
fatal_error "Unable to copy $CONFIG_DIR/admin-ep-cert.pem to certificates dir"
fi
fi
if [ -e $CONFIG_DIR/dc-adminep-root-ca.crt ]
then
cp $CONFIG_DIR/dc-adminep-root-ca.crt /etc/pki/ca-trust/source/anchors/
if [ $? -ne 0 ]
then
fatal_error "Unable to copy $CONFIG_DIR/dc-adminep-root-ca.crt to certificates dir"
fi
# Update system trusted CA cert list with the new CA cert.
update-ca-trust extract
if [ $? -ne 0 ]
then
fatal_error "Unable to update system trusted CA certificate list"
fi
fi
if [ -e $CONFIG_DIR/openstack ] if [ -e $CONFIG_DIR/openstack ]
then then
if [ ! -e /etc/ssl/private/openstack ] if [ ! -e /etc/ssl/private/openstack ]

View File

@ -0,0 +1,95 @@
#!/usr/bin/python
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will perform upgrade preparation and migration operations for
# host-swact to controller-1.
#
import os
import shutil
import subprocess
import sys
import yaml
from oslo_log import log
LOG = log.getLogger(__name__)
ETCD_PATH = "/opt/etcd"
UPGRADE_CONTROLLER_1_FILE = "/etc/platform/.upgrade_swact_controller_1"
def main():
action = None
from_release = None
to_release = None
arg = 1
while arg < len(sys.argv):
if arg == 1:
action = sys.argv[arg]
elif arg == 2:
from_release = sys.argv[arg]
elif arg == 3:
to_release = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
if action == "migrate_etcd":
try:
migrate_etcd_on_swact()
except Exception as ex:
LOG.exception(ex)
return 1
elif action == "prepare_swact":
upgrade_prepare_swact(from_release, to_release)
return 0
def upgrade_prepare_swact(from_release, to_release):
migrate_data = {
'from_release': from_release,
'to_release': to_release
}
with open(UPGRADE_CONTROLLER_1_FILE, 'w') as f:
yaml.dump(migrate_data, f, default_flow_style=False)
def migrate_etcd_on_swact():
with open(UPGRADE_CONTROLLER_1_FILE, 'r') as f:
document = yaml.safe_load(f)
from_release = document.get('from_release')
to_release = document.get('to_release')
dest_etcd = os.path.join(ETCD_PATH, to_release)
if os.path.exists(dest_etcd):
# The dest_etcd must not have already been created,
# however this can occur on a forced host-swact
LOG.info("skipping etcd migration %s already exists" %
dest_etcd)
return
if not os.path.isfile(UPGRADE_CONTROLLER_1_FILE):
LOG.info("skipping etcd migration, no request %s" %
UPGRADE_CONTROLLER_1_FILE)
return
source_etcd = os.path.join(ETCD_PATH, from_release)
try:
shutil.copytree(os.path.join(source_etcd),
os.path.join(dest_etcd))
os.remove(UPGRADE_CONTROLLER_1_FILE)
except subprocess.CalledProcessError:
LOG.exception("Failed to migrate %s" % source_etcd)
raise
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,133 +0,0 @@
#!/usr/bin/python3
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will remove all neutron bindings from controller-1.
# This is necessary to match the behaviour on controller-1 after
# the host is locked.
# This should be removed once we support data migration upon a
# swact to controller-1 during an upgrade.
import psycopg2
import sys
from psycopg2.extras import RealDictCursor
from oslo_log import log
LOG = log.getLogger(__name__)
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "18.03" and action == "migrate":
try:
move_routers_off_controller_1()
move_networks_off_controller_1()
move_port_bindings_off_controller_1()
move_dhcp_port_device_id_off_controller_1()
move_distributed_port_bindings_off_controller_1()
except Exception as ex:
LOG.exception(ex)
print(ex)
return 1
def run_cmd_postgres(cmd):
"""
This executes the given command as user postgres. This is necessary when
this script is run as root, which is the case on an upgrade activation.
"""
neutron_conn = psycopg2.connect("dbname=neutron user=postgres")
with neutron_conn:
with neutron_conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(cmd)
LOG.info("Executing '%s'" % cmd)
def move_routers_off_controller_1():
"""
This function moves all routers hosted on controller-1 to controller-0.
This is required to match the DB state after controller-1 is locked as
part of the upgrade, at which point they will be automatically reschduled.
"""
cmd = ("UPDATE routerl3agentbindings SET l3_agent_id="
"(SELECT id FROM agents WHERE agent_type='L3 agent'"
" AND host='controller-0') WHERE l3_agent_id IN"
" (SELECT id FROM agents WHERE agent_type='L3 agent'"
" AND host='controller-1') AND (SELECT count(id)"
" FROM agents WHERE agent_type='L3 agent'"
" AND host='controller-0')=1;")
run_cmd_postgres(cmd)
def move_networks_off_controller_1():
"""
This function moves all dhcp bindings from controller-1 to controller-0.
This is required to match the DB state after controller-1 is locked as
part of the upgrade, at which point they will be automatically reschduled.
"""
cmd = ("UPDATE networkdhcpagentbindings SET dhcp_agent_id="
"(SELECT id FROM agents WHERE agent_type='DHCP agent'"
" AND host='controller-0') WHERE dhcp_agent_id IN"
" (SELECT id FROM agents WHERE agent_type='DHCP agent'"
" AND host='controller-1') AND (SELECT count(id)"
" FROM agents WHERE agent_type='DHCP agent'"
" AND host='controller-0')=1;")
run_cmd_postgres(cmd)
def move_dhcp_port_device_id_off_controller_1():
"""
This function updates all dhcp ports' device IDs bound to controller-0
over to controller-1. Note that because the prefix is based on hostname,
this prefix is constant for both controllers.
controller-0: "dhcpaebe17f8-776d-5ab6-9a5f-e9bdeeaca66f"
controller-1: "dhcpf42f2830-b2ec-5a2c-93f3-e3e3328e20a3"
"""
cmd = ("UPDATE ports SET device_id ="
" REPLACE(device_id,"
" 'dhcpf42f2830-b2ec-5a2c-93f3-e3e3328e20a3',"
" 'dhcpaebe17f8-776d-5ab6-9a5f-e9bdeeaca66f')"
" WHERE device_owner = 'network:dhcp';")
run_cmd_postgres(cmd)
def move_port_bindings_off_controller_1():
"""
This function moves all port bindings from controller-1 to controller-0.
"""
cmd = ("UPDATE ml2_port_bindings SET host='controller-0'"
" WHERE host='controller-1';")
run_cmd_postgres(cmd)
def move_distributed_port_bindings_off_controller_1():
"""
This function deletes all ml2_distributed_port_bindings on contorller-1.
"""
cmd = ("DELETE FROM ml2_distributed_port_bindings"
" WHERE host='controller-1';")
run_cmd_postgres(cmd)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,104 +0,0 @@
#!/usr/bin/python3
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update the storage backends for controller-1.
#
import json
import psycopg2
import sys
from sysinv.common import constants
from psycopg2.extras import RealDictCursor
from oslo_log import log
LOG = log.getLogger(__name__)
# Sections that need to be removed from retired Ceph cache tiering feature
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering'
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_DESIRED = 'cache_tiering.desired'
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_APPLIED = 'cache_tiering.applied'
def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1
log.configure()
if from_release == "18.03" and action == "migrate":
try:
cleanup_ceph_cache_tiering_service_parameters(from_release)
cleanup_ceph_personality_subtype(from_release)
except Exception as ex:
LOG.exception(ex)
return 1
def cleanup_ceph_cache_tiering_service_parameters(from_release):
conn = psycopg2.connect("dbname=sysinv user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
for s in [SERVICE_PARAM_SECTION_CEPH_CACHE_TIER,
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_DESIRED,
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_APPLIED]:
cur.execute("select * from service_parameter where service=%s "
"and section=%s", (constants.SERVICE_TYPE_CEPH,
s,))
parameters = cur.fetchall()
if not parameters:
LOG.info("No service_parameter data for section %s "
"found." % s)
continue
for p in parameters:
LOG.debug("Found %s/%s" % (p['section'], p['name']))
LOG.info("Removing ceph service parameters from section "
"%s" % s)
cur.execute("delete from service_parameter where service=%s "
"and section=%s", (constants.SERVICE_TYPE_CEPH,
s,))
def cleanup_ceph_personality_subtype(from_release):
conn = psycopg2.connect("dbname=sysinv user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select hostname, capabilities from i_host")
parameters = cur.fetchall()
if not parameters:
LOG.info("No capabilities data found ")
return
for p in parameters:
LOG.debug("Found host capabilities %s/%s" %
(p['hostname'], p['capabilities']))
json_dict = json.loads(p['capabilities'])
if 'pers_subtype' in json_dict:
del json_dict['pers_subtype']
LOG.info("Removing ceph pers_subtype from capabilities")
cur.execute("update i_host set capabilities='%s';" %
json.dumps(json_dict))
if __name__ == "__main__":
sys.exit(main())

View File

@ -56,6 +56,7 @@ Configuration for the Controller node.
install -d -m 755 %{buildroot}%{local_bindir} install -d -m 755 %{buildroot}%{local_bindir}
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
install -p -D -m 700 scripts/upgrade_swact_migration.py %{buildroot}%{local_bindir}/upgrade_swact_migration.py
install -d -m 755 %{buildroot}%{local_goenabledd} install -d -m 755 %{buildroot}%{local_goenabledd}
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
@ -65,7 +66,7 @@ install -p -D -m 755 scripts/controller_config %{buildroot}%{local_etc_initd}/co
# Install Upgrade scripts # Install Upgrade scripts
install -d -m 755 %{buildroot}%{local_etc_upgraded} install -d -m 755 %{buildroot}%{local_etc_upgraded}
install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/ # install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{_unitdir}/controllerconfig.service install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{_unitdir}/controllerconfig.service
@ -96,7 +97,7 @@ rm -rf $RPM_BUILD_ROOT
%{local_goenabledd}/* %{local_goenabledd}/*
%{local_etc_initd}/* %{local_etc_initd}/*
%dir %{local_etc_upgraded} %dir %{local_etc_upgraded}
%{local_etc_upgraded}/* # %{local_etc_upgraded}/*
%{_unitdir}/* %{_unitdir}/*
#%%package wheels #%%package wheels

View File

@ -1,2 +1,2 @@
SRC_DIR="storageconfig" SRC_DIR="storageconfig"
TIS_PATCH_VER=6 TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,2 +1,2 @@
SRC_DIR="cgts-client" SRC_DIR="cgts-client"
TIS_PATCH_VER=75 TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -21,6 +21,7 @@ Requires: python3-keystoneclient
Requires: python3-oslo-i18n Requires: python3-oslo-i18n
Requires: python3-oslo-serialization Requires: python3-oslo-serialization
Requires: python3-oslo-utils Requires: python3-oslo-utils
Requires: python3-requests-toolbelt
# Needed for python2 and python3 compatible # Needed for python2 and python3 compatible
Requires: python3-six Requires: python3-six

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013-2018 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #

View File

@ -57,6 +57,11 @@ class Manager(object):
'POST', url, body=body, data=data) 'POST', url, body=body, data=data)
return resp return resp
def _upload_multipart(self, url, body, data=None):
resp = self.api.upload_request_with_multipart(
'POST', url, body=body, data=data)
return resp
def _json_get(self, url, body=None): def _json_get(self, url, body=None):
"""send a GET request and return a json serialized object""" """send a GET request and return a json serialized object"""
_, body = self.api.json_request('GET', url, body=body) _, body = self.api.json_request('GET', url, body=body)

View File

@ -15,13 +15,13 @@
# under the License. # under the License.
# #
import httplib2
import logging import logging
import os import os
import requests import requests
from requests_toolbelt import MultipartEncoder
import socket import socket
import httplib2
import six import six
from six.moves.urllib.parse import urlparse from six.moves.urllib.parse import urlparse
@ -293,6 +293,19 @@ class HTTPClient(httplib2.Http):
data=data) data=data)
return req.json() return req.json()
def upload_request_with_multipart(self, method, url, **kwargs):
self.authenticate_and_fetch_endpoint_url()
connection_url = self._get_connection_url(url)
fields = kwargs.get('data')
fields['file'] = (kwargs['body'], open(kwargs['body'], 'rb'))
enc = MultipartEncoder(fields)
headers = {'Content-Type': enc.content_type,
"X-Auth-Token": self.auth_token}
req = requests.post(connection_url,
data=enc,
headers=headers)
return req.json()
################# #################
# AUTHENTICATE # AUTHENTICATE
################# #################

View File

@ -0,0 +1,123 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import copy
import testtools
from cgtsclient.tests import utils
import cgtsclient.v1.controller_fs
CONTROLLER_FS = {
'uuid': '66666666-7777-8888-9999-000000000000',
'name': 'cfs',
'size': 10,
'logical_volume': 'cfs-lv',
'replicated': True,
'state': 'available'
}
UPDATED_CONTROLLER_FS = copy.deepcopy(CONTROLLER_FS)
NEW_SIZE = 20
UPDATED_CONTROLLER_FS['size'] = NEW_SIZE
SYSTEM_UUID = "11111111-2222-3333-4444-5555-000000000000"
fixtures = {
'/v1/controller_fs':
{
'GET': (
{},
{"controller_fs": [CONTROLLER_FS]},
),
},
'/v1/controller_fs/%s' % CONTROLLER_FS['uuid']:
{
'GET': (
{},
CONTROLLER_FS,
),
'PATCH': (
{},
UPDATED_CONTROLLER_FS,
),
},
'/v1/isystems/%s/controller_fs/update_many' % SYSTEM_UUID:
{
'PUT': (
{},
{},
),
},
}
class ControllerFsManagerTest(testtools.TestCase):
def setUp(self):
super(ControllerFsManagerTest, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.mgr = cgtsclient.v1.controller_fs.ControllerFsManager(self.api)
def test_controller_fs_list(self):
controllerfs = self.mgr.list()
expect = [
('GET', '/v1/controller_fs', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(controllerfs), 1)
def test_controller_fs_show(self):
controllerfs = self.mgr.get(CONTROLLER_FS['uuid'])
expect = [
('GET', '/v1/controller_fs/%s' % CONTROLLER_FS['uuid'], {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(controllerfs.uuid, CONTROLLER_FS['uuid'])
def test_controller_fs_update(self):
patch = [
{
'op': 'replace',
'value': NEW_SIZE,
'path': '/size'
},
{
'op': 'replace',
'value': CONTROLLER_FS['name'],
'path': '/name'
}
]
controllerfs = self.mgr.update(CONTROLLER_FS['uuid'], patch)
expect = [
('PATCH', '/v1/controller_fs/%s' % CONTROLLER_FS['uuid'], {}, patch),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(controllerfs.size, NEW_SIZE)
def test_controller_fs_update_many(self):
# One patch is a list of two dictionaries.
# for update_many, this is a list of lists
patches = [
[
{
'op': 'replace',
'value': NEW_SIZE,
'path': '/size'
},
{
'op': 'replace',
'value': CONTROLLER_FS['name'],
'path': '/name'
}
]
]
self.mgr.update_many(SYSTEM_UUID, patches)
expect = [
('PUT', '/v1/isystems/%s/controller_fs/update_many' % SYSTEM_UUID, {}, patches),
]
# Since update_many is just a PUT, we don't expect any output from it, so we can't
# do a proper asert here. We just check if the request made is the one we expected.
self.assertEqual(self.api.calls, expect)

View File

@ -0,0 +1,131 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import copy
import mock
from cgtsclient.tests import test_shell
from cgtsclient.v1.controller_fs import ControllerFs
from cgtsclient.v1.isystem import isystem
FAKE_CONTROLLER_FS = {
'uuid': '66666666-7777-8888-9999-000000000000',
'name': 'fake',
'size': 10,
'logical_volume': 'fake-lv',
'replicated': True,
'state': 'available',
'created_at': None,
'updated_at': None
}
FAKE_ISYSTEM = {
'uuid': '11111111-2222-3333-4444-5555-000000000000'
}
MODIFY_CONTROLLER_FS = copy.deepcopy(FAKE_CONTROLLER_FS)
MODIFY_CONTROLLER_FS['size'] = 15
MODIFY_CONTROLLER_FS['state'] = 'drbd_fs_resizing_in_progress'
class ControllerFsTest(test_shell.ShellTest):
def setUp(self):
super(ControllerFsTest, self).setUp()
# Mock the client
p = mock.patch('cgtsclient.client._get_endpoint')
self.mock_cgtsclient_client_get_endpoint = p.start()
self.mock_cgtsclient_client_get_endpoint.return_value = \
'http://fakelocalhost:6385/v1'
self.addCleanup(p.stop)
p = mock.patch('cgtsclient.client._get_ksclient')
self.mock_cgtsclient_client_get_ksclient = p.start()
self.addCleanup(p.stop)
# Mock the ControllerFsManager
self.controller_fs_manager_list_result = [
ControllerFs(None, FAKE_CONTROLLER_FS, True)]
def mock_controller_fs_manager_list(obj):
return self.controller_fs_manager_list_result
self.mocked_controller_fs_manager_list = mock.patch(
'cgtsclient.v1.controller_fs.ControllerFsManager.list',
mock_controller_fs_manager_list)
self.mocked_controller_fs_manager_list.start()
self.addCleanup(self.mocked_controller_fs_manager_list.stop)
self.controller_fs_manager_get_result = \
ControllerFs(None, FAKE_CONTROLLER_FS, True)
def mock_controller_fs_manager_get(obj):
return self.controller_fs_manager_get_result
self.mocked_controller_fs_manager_get = mock.patch(
'cgtsclient.v1.controller_fs.ControllerFsManager.get',
mock_controller_fs_manager_get)
self.mocked_controller_fs_manager_get.start()
self.addCleanup(self.mocked_controller_fs_manager_get.stop)
def mock_controller_fs_manager_update_many(obj, system_uuid, patch_list):
return None
self.mocked_controller_fs_manager_update_many = mock.patch(
'cgtsclient.v1.controller_fs.ControllerFsManager.update_many',
mock_controller_fs_manager_update_many)
self.mocked_controller_fs_manager_update_many.start()
self.addCleanup(self.mocked_controller_fs_manager_update_many.stop)
# Mock isystemManager
self.isystem_manager_list_result = [
isystem(None, FAKE_ISYSTEM, None)]
def mock_isystem_manager_list(obj):
return self.isystem_manager_list_result
self.mocked_isystem_manager_list = mock.patch(
'cgtsclient.v1.isystem.isystemManager.list',
mock_isystem_manager_list)
self.mocked_isystem_manager_list.start()
self.addCleanup(self.mocked_isystem_manager_list.stop)
def test_controller_fs_list(self):
self.make_env()
results = self.shell("controllerfs-list --nowrap")
self.assertIn(str(FAKE_CONTROLLER_FS['uuid']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['name']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['size']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['logical_volume']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['replicated']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['state']), results)
def test_controller_fs_show(self):
self.make_env()
result = self.shell("controllerfs-show fake")
self.assertIn(str(FAKE_CONTROLLER_FS['uuid']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['name']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['size']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['logical_volume']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['replicated']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['state']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['created_at']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['updated_at']), result)
def test_controller_fs_modify(self):
self.make_env()
self.controller_fs_manager_list_result = [
ControllerFs(None, MODIFY_CONTROLLER_FS, True)]
results = self.shell("controllerfs-modify fake=15")
self.assertIn(str(MODIFY_CONTROLLER_FS['uuid']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['name']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['size']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['logical_volume']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['replicated']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['state']), results)

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
@ -26,6 +26,9 @@ from cgtsclient.v1 import certificate
from cgtsclient.v1 import cluster from cgtsclient.v1 import cluster
from cgtsclient.v1 import controller_fs from cgtsclient.v1 import controller_fs
from cgtsclient.v1 import datanetwork from cgtsclient.v1 import datanetwork
from cgtsclient.v1 import device_image
from cgtsclient.v1 import device_image_state
from cgtsclient.v1 import device_label
from cgtsclient.v1 import drbdconfig from cgtsclient.v1 import drbdconfig
from cgtsclient.v1 import ethernetport from cgtsclient.v1 import ethernetport
from cgtsclient.v1 import fernet from cgtsclient.v1 import fernet
@ -165,3 +168,6 @@ class Client(http.HTTPClient):
self.kube_version = kube_version.KubeVersionManager(self) self.kube_version = kube_version.KubeVersionManager(self)
self.kube_upgrade = kube_upgrade.KubeUpgradeManager(self) self.kube_upgrade = kube_upgrade.KubeUpgradeManager(self)
self.kube_host_upgrade = kube_host_upgrade.KubeHostUpgradeManager(self) self.kube_host_upgrade = kube_host_upgrade.KubeHostUpgradeManager(self)
self.device_image = device_image.DeviceImageManager(self)
self.device_image_state = device_image_state.DeviceImageStateManager(self)
self.device_label = device_label.DeviceLabelManager(self)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013-2017 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -39,6 +39,13 @@ def _print_controller_fs_show(controller_fs):
action='append', action='append',
default=[], default=[],
help="Modify controller filesystem sizes") help="Modify controller filesystem sizes")
@utils.arg('--column',
action='append',
default=[],
help="Specify the column(s) to include, can be repeated")
@utils.arg('--format',
choices=['table', 'yaml', 'value'],
help="specify the output format, defaults to table")
def do_controllerfs_modify(cc, args): def do_controllerfs_modify(cc, args):
"""Modify controller filesystem sizes.""" """Modify controller filesystem sizes."""
@ -59,7 +66,7 @@ def do_controllerfs_modify(cc, args):
except exc.HTTPNotFound: except exc.HTTPNotFound:
raise exc.CommandError('Failed to modify controller filesystems') raise exc.CommandError('Failed to modify controller filesystems')
_print_controllerfs_list(cc) _print_controllerfs_list(cc, args)
@utils.arg('name', @utils.arg('name',
@ -72,15 +79,28 @@ def do_controllerfs_show(cc, args):
_print_controller_fs_show(controller_fs) _print_controller_fs_show(controller_fs)
def _print_controllerfs_list(cc): def _print_controllerfs_list(cc, args):
controller_fs_list = cc.controller_fs.list() controller_fs_list = cc.controller_fs.list()
field_labels = ['UUID', 'FS Name', 'Size in GiB', 'Logical Volume', if args.column:
'Replicated', 'State'] fields = args.column
fields = ['uuid', 'name', 'size', 'logical_volume', 'replicated', 'state'] field_labels = args.column
utils.print_list(controller_fs_list, fields, field_labels, sortby=1) else:
field_labels = ['UUID', 'FS Name', 'Size in GiB', 'Logical Volume',
'Replicated', 'State']
fields = ['uuid', 'name', 'size', 'logical_volume', 'replicated', 'state']
utils.print_list(controller_fs_list, fields, field_labels,
sortby=0, output_format=args.format)
@utils.arg('--column',
action='append',
default=[],
help="Specify the column(s) to include, can be repeated")
@utils.arg('--format',
choices=['table', 'yaml', 'value'],
help="specify the output format, defaults to table")
def do_controllerfs_list(cc, args): def do_controllerfs_list(cc, args):
"""Show list of controller filesystems""" """Show list of controller filesystems"""
_print_controllerfs_list(cc) _print_controllerfs_list(cc, args)

View File

@ -0,0 +1,81 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient.common import utils
from cgtsclient import exc
CREATION_ATTRIBUTES = [
'bitstream_type', 'pci_vendor', 'pci_device',
'bitstream_id', 'key_signature', 'revoke_key_id',
'name', 'description', 'image_version', 'uuid']
class DeviceImage(base.Resource):
def __repr__(self):
return "<DeviceImage %s>" % self._info
class DeviceImageManager(base.Manager):
resource_class = DeviceImage
@staticmethod
def _path(uuid=None):
return '/v1/device_images/%s' % uuid if uuid else '/v1/device_images'
def list(self):
return self._list(self._path(), "device_images")
def get(self, device_image_id):
try:
return self._list(self._path(device_image_id))[0]
except IndexError:
return None
def create(self, file, **kwargs):
data = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
data[key] = value
else:
raise exc.InvalidAttribute('%s' % key)
return self._upload_multipart(self._path(), file, data=data)
def apply(self, device_image_id, labels=None):
return self._update(self._path(device_image_id) + '?action=apply',
labels)
def remove(self, device_image_id, labels=None):
return self._update(self._path(device_image_id) + '?action=remove',
labels)
def delete(self, device_image_id):
return self._delete(self._path(device_image_id))
def _find_device_image(cc, device_image):
if device_image.isdigit() and not utils.is_uuid_like(device_image):
device_image_list = cc.device_image.list()
for n in device_image_list:
if str(n.id) == device_image:
return n
else:
raise exc.CommandError('device image not found: %s' % device_image)
elif utils.is_uuid_like(device_image):
try:
h = cc.device_image.get(device_image)
except exc.HTTPNotFound:
raise exc.CommandError('device image not found: %s' % device_image)
else:
return h
else:
device_image_list = cc.device_image.list()
for n in device_image_list:
if n.name == device_image:
return n
else:
raise exc.CommandError('device image not found: %s' % device_image)

View File

@ -0,0 +1,157 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import utils
from cgtsclient import exc
import os
def _print_device_image_show(obj):
fields = ['uuid', 'bitstream_type',
'pci_vendor', 'pci_device',
'bitstream_id', 'key_signature', 'revoke_key_id',
'name', 'description', 'image_version', 'applied_labels']
if type(obj) is dict:
data = [(f, obj.get(f, '')) for f in fields]
else:
data = [(f, getattr(obj, f, '')) for f in fields]
utils.print_tuple_list(data)
@utils.arg('device_image_id',
metavar='<device_image_id>',
help="UUID or name of device_image")
def do_device_image_show(cc, args):
"""Show device image details."""
device_image = cc.device_image.get(args.device_image_id)
_print_device_image_show(device_image)
def do_device_image_list(cc, args):
"""List device images."""
labels = ['uuid', 'bitstream_type', 'pci_vendor', 'pci_device',
'bitstream_id', 'key_signature', 'revoke_key_id',
'name', 'description', 'image_version', 'applied_labels']
fields = ['uuid', 'bitstream_type', 'pci_vendor', 'pci_device',
'bitstream_id', 'key_signature', 'revoke_key_id',
'name', 'description', 'image_version', 'applied_labels']
device_images = cc.device_image.list()
utils.print_list(device_images, fields, labels, sortby=1)
@utils.arg('bitstream_file',
metavar='<bitstream_file>',
help='Path to Bitstream file [REQUIRED] ')
@utils.arg('bitstream_type',
metavar='<bitstream_type>',
choices=['root-key', 'functional', 'key-revocation'],
help="Type of the device image bitstream [REQUIRED]")
@utils.arg('pci_vendor',
metavar='<pci_vendor>',
help="PCI vendor (hexadecimal) of the device image [REQUIRED]")
@utils.arg('pci_device',
metavar='<pci_device>',
help="PCI device (hexadecimal) of the device image [REQUIRED]")
@utils.arg('--bitstream-id',
metavar='<bitstream_id>',
help='Bitstream ID (hexadecimal) of the functional device image')
@utils.arg('--key-signature',
metavar='<key_signature>',
help='Key signature (hexadecimal) of the root-key device image')
@utils.arg('--revoke-key-id',
metavar='<revoke_key_id>',
help='Key ID of the key revocation device image')
@utils.arg('--name',
metavar='<name>',
help='Name of the device image')
@utils.arg('--description',
metavar='<description>',
help='Description of the device image')
@utils.arg('--image-version',
metavar='<version>',
help='Version of the device image')
@utils.arg('-u', '--uuid',
metavar='<uuid>',
help='UUID of the device image')
def do_device_image_create(cc, args):
"""Create a device image."""
if not os.path.isfile(args.bitstream_file):
raise exc.CommandError('Bitstream file does not exist: %s' %
args.bitstream_file)
field_list = ['uuid', 'bitstream_type', 'pci_vendor', 'pci_device',
'bitstream_id', 'key_signature', 'revoke_key_id',
'name', 'description', 'image_version']
# Prune input fields down to required/expected values
user_fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
try:
response = cc.device_image.create(args.bitstream_file, **user_fields)
error = response.get('error')
if error:
raise exc.CommandError("%s" % error)
except exc.HTTPNotFound:
raise exc.CommandError(
'Device image not created for %s. No response.' % args.bitstream_file)
except Exception as e:
raise exc.CommandError('Device image not created for %s: %s' %
(args.bitstream_file, e))
else:
device_image = response.get('device_image')
_print_device_image_show(device_image)
@utils.arg('device_image_uuid', metavar='<device_image_uuid>',
help='UUID of the device image')
@utils.arg('attributes',
metavar='<name=value>',
nargs='*',
action='append',
default=[],
help="List of device labels")
def do_device_image_apply(cc, args):
"""Apply the device image"""
attributes = utils.extract_keypairs(args)
try:
response = cc.device_image.apply(args.device_image_uuid,
attributes)
_print_device_image_show(response)
except exc.HTTPNotFound:
raise exc.CommandError('Device image apply failed')
@utils.arg('device_image_uuid', metavar='<device_image_uuid>',
help='UUID of the device image')
@utils.arg('attributes',
metavar='<name=value>',
nargs='*',
action='append',
default=[],
help="List of device labels")
def do_device_image_remove(cc, args):
"""Remove the device image"""
attributes = utils.extract_keypairs(args)
try:
response = cc.device_image.remove(args.device_image_uuid,
attributes)
_print_device_image_show(response)
except exc.HTTPNotFound:
raise exc.CommandError('Device image remove failed')
@utils.arg('device_image_uuid',
metavar='<device_image_uuid>',
help="UUID of device image entry")
def do_device_image_delete(cc, args):
"""Delete a device image."""
cc.device_image.delete(args.device_image_uuid)
print('Deleted device image: %s' % args.device_image_uuid)

View File

@ -0,0 +1,23 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
class DeviceImageState(base.Resource):
def __repr__(self):
return "<DeviceImageState %s>" % self._info
class DeviceImageStateManager(base.Manager):
resource_class = DeviceImageState
@staticmethod
def _path(uuid=None):
return '/v1/device_image_state/%s' % uuid if uuid else '/v1/device_image_state'
def list(self):
return self._list(self._path(), "device_image_state")

View File

@ -0,0 +1,24 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import utils
from cgtsclient.v1 import ihost as ihost_utils
def do_device_image_state_list(cc, args):
"""List image to device mapping with status."""
device_image_state = cc.device_image_state.list()
for d in device_image_state[:]:
pdevice = cc.pci_device.get(d.pcidevice_uuid)
setattr(d, 'pciaddr', getattr(pdevice, 'pciaddr'))
host = ihost_utils._find_ihost(cc, getattr(pdevice, 'host_uuid'))
setattr(d, 'hostname', host.hostname)
labels = ['hostname', 'PCI device address', 'Device image uuid', 'status',
'Update start time', 'updated_at']
fields = ['hostname', 'pciaddr', 'image_uuid', 'status',
'update_start_time', 'updated_at']
utils.print_list(device_image_state, fields, labels, sortby=1)

View File

@ -0,0 +1,40 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient.v1 import options
class DeviceLabel(base.Resource):
def __repr__(self):
return "<DeviceLabel %s>" % self._info
class DeviceLabelManager(base.Manager):
resource_class = DeviceLabel
@staticmethod
def _path(label_id=None):
return '/v1/device_labels/%s' % label_id if label_id else \
'/v1/device_labels'
def list(self):
path = '/v1/device_labels'
return self._list(path, "device_labels")
def get(self, uuid):
path = '/v1/device_labels/%s' % uuid
try:
return self._list(path)[0]
except IndexError:
return None
def assign(self, label, parameters=None):
return self._create(options.build_url(self._path(), q=None,
params=parameters), label)
def remove(self, uuid):
return self._delete(self._path(uuid))

View File

@ -0,0 +1,120 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import ihost as ihost_utils
from cgtsclient.v1 import pci_device
def _print_device_label_show(obj):
fields = ['uuid', 'label_key', 'label_value']
data = [(f, getattr(obj, f, '')) for f in fields]
utils.print_tuple_list(data)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host")
@utils.arg('nameorpciaddr',
metavar='<devicename or address>',
help="Name or PCI address of device")
def do_host_device_label_list(cc, args):
"""List device labels"""
host = ihost_utils._find_ihost(cc, args.hostnameorid)
device = pci_device.find_device(cc, host, args.nameorpciaddr)
device_labels = cc.device_label.list()
for dl in device_labels[:]:
if dl.pcidevice_uuid != device.uuid:
device_labels.remove(dl)
else:
setattr(dl, 'hostname', host.hostname)
setattr(dl, 'devicename', device.name)
field_labels = ['hostname', 'PCI device name', 'label key', 'label value']
fields = ['hostname', 'devicename', 'label_key', 'label_value']
utils.print_list(device_labels, fields, field_labels, sortby=1)
def do_device_label_list(cc, args):
"""List all device labels"""
device_labels = cc.device_label.list()
for dl in device_labels[:]:
pci_device = cc.pci_device.get(dl.pcidevice_uuid)
setattr(dl, 'devicename', getattr(pci_device, 'name'))
host = ihost_utils._find_ihost(cc, getattr(pci_device, 'host_uuid'))
setattr(dl, 'hostname', host.hostname)
field_labels = ['hostname', 'PCI device name', 'label key', 'label value']
fields = ['hostname', 'devicename', 'label_key', 'label_value']
utils.print_list(device_labels, fields, field_labels, sortby=1)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host")
@utils.arg('nameorpciaddr',
metavar='<pci name or address>',
help="Name or PCI address of device")
@utils.arg('attributes',
metavar='<name=value>',
nargs='+',
action='append',
default=[],
help="List of device labels")
@utils.arg('--overwrite',
action='store_true',
help="Allow existing label values to be overwritten")
def do_host_device_label_assign(cc, args):
"""Assign a label to a device of a host"""
attributes = utils.extract_keypairs(args)
parameters = ["overwrite=" + str(args.overwrite)]
host = ihost_utils._find_ihost(cc, args.hostnameorid)
device = pci_device.find_device(cc, host, args.nameorpciaddr)
attributes.update({'pcidevice_uuid': device.uuid})
new_device_labels = cc.device_label.assign(attributes, parameters)
for p in new_device_labels.device_labels:
uuid = p['uuid']
if uuid is not None:
try:
device_label = cc.device_label.get(uuid)
except exc.HTTPNotFound:
raise exc.CommandError('Host device label not found: %s' % uuid)
_print_device_label_show(device_label)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host [REQUIRED]")
@utils.arg('nameorpciaddr',
metavar='<pci name or address>',
help="Name or PCI address of device")
@utils.arg('attributes',
metavar='<name>',
nargs='+',
action='append',
default=[],
help="List of device label keys")
def do_host_device_label_remove(cc, args):
"""Remove a device label from a device of a host"""
host = ihost_utils._find_ihost(cc, args.hostnameorid)
device = pci_device.find_device(cc, host, args.nameorpciaddr)
for i in args.attributes[0]:
lbl = _find_host_device_label(cc, host, device, i)
if lbl:
cc.device_label.remove(lbl.uuid)
print('Deleted device label %s for host %s device %s' %
(i, host.hostname, device.name))
def _find_host_device_label(cc, host, device, label):
device_labels = cc.device_label.list()
for lbl in device_labels:
if (lbl.pcidevice_uuid == device.uuid and lbl.label_key == label):
break
else:
lbl = None
print('Host device label not found: host %s, device %s, label key %s ' %
(host.hostname, device.name, label))
return lbl

View File

@ -9,6 +9,8 @@
from cgtsclient.common import base from cgtsclient.common import base
from cgtsclient import exc from cgtsclient import exc
CREATION_ATTRIBUTES = ['name', 'ihost_uuid', 'size']
class HostFs(base.Resource): class HostFs(base.Resource):
def __repr__(self): def __repr__(self):
@ -41,6 +43,21 @@ class HostFsManager(base.Manager):
if body: if body:
return self.resource_class(self, body) return self.resource_class(self, body)
def delete(self, fs_id):
path = '/v1/host_fs/%s' % fs_id
return self._delete(path)
def create(self, **kwargs):
path = '/v1/host_fs'
valid_list = []
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute('%s' % key)
return self._create(path, new)
def _find_fs(cc, ihost, host_fs): def _find_fs(cc, ihost, host_fs):
if host_fs.isdigit(): if host_fs.isdigit():

View File

@ -96,3 +96,56 @@ def do_host_fs_modify(cc, args):
raise exc.CommandError('Failed to modify filesystems') raise exc.CommandError('Failed to modify filesystems')
_print_fs_list(cc, ihost.uuid) _print_fs_list(cc, ihost.uuid)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of the host [REQUIRED]")
@utils.arg('name',
metavar='<fs name>',
help="Name of the Filesystem [REQUIRED]")
def do_host_fs_delete(cc, args):
"""Delete a host filesystem."""
# Get the ihost object
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
host_fs = fs_utils._find_fs(cc, ihost, args.name)
try:
cc.host_fs.delete(host_fs.uuid)
except exc.HTTPNotFound:
raise exc.CommandError('Filesystem delete failed: host %s: '
'name %s' % (args.hostnameorid,
args.name))
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of the host [REQUIRED]")
@utils.arg('name',
metavar='<fs name=size>',
nargs=1,
action='append',
help="Name of the Filesystem [REQUIRED]")
def do_host_fs_add(cc, args):
"""Add a host filesystem"""
fields = {}
# Get the ihost object
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
for attr in args.name[0]:
try:
fs_name, size = attr.split("=", 1)
fields['name'] = fs_name
fields['size'] = size
except ValueError:
raise exc.CommandError('Filesystem creation attributes must be '
'FS_NAME=SIZE not "%s"' % attr)
try:
fields['ihost_uuid'] = ihost.uuid
fs = cc.host_fs.create(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Failed to create filesystem: host %s: fields %s' %
(args.hostnameorid, fields))
_print_fs_show(fs)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -39,7 +39,8 @@ def _print_ihost_show(ihost, columns=None, output_format=None):
'boot_device', 'rootfs_device', 'install_output', 'console', 'boot_device', 'rootfs_device', 'install_output', 'console',
'tboot', 'vim_progress_status', 'software_load', 'tboot', 'vim_progress_status', 'software_load',
'install_state', 'install_state_info', 'inv_state', 'install_state', 'install_state_info', 'inv_state',
'clock_synchronization'] 'clock_synchronization',
'device_image_update', 'reboot_needed']
optional_fields = ['vsc_controllers', 'ttys_dcd'] optional_fields = ['vsc_controllers', 'ttys_dcd']
if ihost.subfunctions != ihost.personality: if ihost.subfunctions != ihost.personality:
fields.append('subfunctions') fields.append('subfunctions')
@ -848,3 +849,31 @@ def do_kube_host_upgrade(cc, args):
data = dict(data_list) data = dict(data_list)
ordereddata = OrderedDict(sorted(data.items(), key=lambda t: t[0])) ordereddata = OrderedDict(sorted(data.items(), key=lambda t: t[0]))
utils.print_dict(ordereddata, wrap=72) utils.print_dict(ordereddata, wrap=72)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host")
def do_host_device_image_update(cc, args):
"""Update device image on a host."""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
try:
host = cc.ihost.device_image_update(ihost.uuid)
except exc.HTTPNotFound:
raise exc.CommandError(
'Device image update failed: host %s' % args.hostnameorid)
_print_ihost_show(host)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host")
def do_host_device_image_update_abort(cc, args):
"""Abort device image update on a host."""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
try:
host = cc.ihost.device_image_update_abort(ihost.uuid)
except exc.HTTPNotFound:
raise exc.CommandError(
'Device image update-abort failed: host %s' % args.hostnameorid)
_print_ihost_show(host)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -138,6 +138,16 @@ class ihostManager(base.Manager):
body=post_body) body=post_body)
return self.resource_class(self, body) return self.resource_class(self, body)
def device_image_update(self, hostid):
path = self._path(hostid) + "/device_image_update"
resp, body = self.api.json_request('POST', path)
return self.resource_class(self, body)
def device_image_update_abort(self, hostid):
path = self._path(hostid) + "/device_image_update_abort"
resp, body = self.api.json_request('POST', path)
return self.resource_class(self, body)
def _find_ihost(cc, ihost): def _find_ihost(cc, ihost):
if ihost.isdigit() or utils.is_uuid_like(ihost): if ihost.isdigit() or utils.is_uuid_like(ihost):

View File

@ -1,13 +1,11 @@
# #
# Copyright (c) 2015 Wind River Systems, Inc. # Copyright (c) 2015-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
# -*- encoding: utf-8 -*-
#
from cgtsclient.common import base from cgtsclient.common import base
from cgtsclient import exc
class PciDevice(base.Resource): class PciDevice(base.Resource):
@ -43,3 +41,13 @@ def get_pci_device_display_name(p):
return p.name return p.name
else: else:
return '(' + str(p.uuid)[-8:] + ')' return '(' + str(p.uuid)[-8:] + ')'
def find_device(cc, host, nameorpciaddr):
devices = cc.pci_device.list(host.uuid)
for d in devices:
if d.name == nameorpciaddr or d.pciaddr == nameorpciaddr:
return d
else:
raise exc.CommandError('PCI device not found: host %s device %s' %
(host.hostname, nameorpciaddr))

View File

@ -1,18 +1,16 @@
# #
# Copyright (c) 2015 Wind River Systems, Inc. # Copyright (c) 2015-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
from cgtsclient.common import utils from cgtsclient.common import utils
from cgtsclient import exc from cgtsclient import exc
from cgtsclient.v1 import ihost as ihost_utils from cgtsclient.v1 import ihost as ihost_utils
# PCI Device Class ID in hexadecimal string
PCI_DEVICE_CLASS_FPGA = '120000'
def _print_device_show(device): def _print_device_show(device):
fields = ['name', 'pciaddr', 'pclass_id', 'pvendor_id', 'pdevice_id', fields = ['name', 'pciaddr', 'pclass_id', 'pvendor_id', 'pdevice_id',
@ -26,6 +24,15 @@ def _print_device_show(device):
'sriov_vfs_pci_address', 'extra_info', 'created_at', 'sriov_vfs_pci_address', 'extra_info', 'created_at',
'updated_at'] 'updated_at']
pclass_id = getattr(device, 'pclass_id')
if pclass_id == PCI_DEVICE_CLASS_FPGA:
fields += ['needs_firmware_update', 'status', 'root_key',
'revoked_key_ids', 'boot_page', 'bitstream_id',
'bmc_build_version', 'bmc_fw_version']
labels += ['needs_firmware_update', 'status', 'root_key',
'revoked_key_ids', 'boot_page', 'bitstream_id',
'bmc_build_version', 'bmc_fw_version']
data = [(f, getattr(device, f, '')) for f in fields] data = [(f, getattr(device, f, '')) for f in fields]
utils.print_tuple_list(data, labels) utils.print_tuple_list(data, labels)

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -14,6 +14,9 @@ from cgtsclient.v1 import certificate_shell
from cgtsclient.v1 import cluster_shell from cgtsclient.v1 import cluster_shell
from cgtsclient.v1 import controller_fs_shell from cgtsclient.v1 import controller_fs_shell
from cgtsclient.v1 import datanetwork_shell from cgtsclient.v1 import datanetwork_shell
from cgtsclient.v1 import device_image_shell
from cgtsclient.v1 import device_image_state_shell
from cgtsclient.v1 import device_label_shell
from cgtsclient.v1 import drbdconfig_shell from cgtsclient.v1 import drbdconfig_shell
from cgtsclient.v1 import ethernetport_shell from cgtsclient.v1 import ethernetport_shell
from cgtsclient.v1 import health_shell from cgtsclient.v1 import health_shell
@ -123,6 +126,9 @@ COMMAND_MODULES = [
host_fs_shell, host_fs_shell,
kube_version_shell, kube_version_shell,
kube_upgrade_shell, kube_upgrade_shell,
device_image_shell,
device_image_state_shell,
device_label_shell,
] ]

View File

@ -3,3 +3,4 @@ keyring
oslo.i18n # Apache-2.0 oslo.i18n # Apache-2.0
oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0 oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0
oslo.utils>=3.5.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0
requests-toolbelt

View File

@ -2,9 +2,10 @@
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
# Hacking already pins down pep8, pyflakes and flake8 # Newer hacking already pins down pep8, pyflakes and flake8
flake8<3.8.0
pycodestyle<2.6.0 # MIT License
hacking>=1.1.0,<=2.0.0 # Apache-2.0 hacking>=1.1.0,<=2.0.0 # Apache-2.0
pycodestyle>=2.0.0 # MIT License
bandit>=1.1.0 # Apache-2.0 bandit>=1.1.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD fixtures>=3.0.0 # Apache-2.0/BSD

View File

@ -1,4 +1,4 @@
SRC_DIR="." SRC_DIR="."
COPY_LIST_TO_TAR="LICENSE sysinv-agent sysinv-agent.conf" COPY_LIST_TO_TAR="LICENSE sysinv-agent sysinv-agent.conf"
EXCLUDE_LIST_FROM_TAR="centos sysinv-agent.bb" EXCLUDE_LIST_FROM_TAR="centos sysinv-agent.bb"
TIS_PATCH_VER=6 TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -1,4 +1,4 @@
Summary: CGCS Host Inventory Init Package Summary: StarlingX Host Inventory Init Package
Name: sysinv-agent Name: sysinv-agent
Version: 1.0 Version: 1.0
Release: %{tis_patch_ver}%{?_tis_dist} Release: %{tis_patch_ver}%{?_tis_dist}
@ -11,7 +11,7 @@ Source0: %{name}-%{version}.tar.gz
BuildRequires: systemd-devel BuildRequires: systemd-devel
%description %description
CGCS Host Inventory Init Package StarlingX Inventory Init Package
%define local_etc_initd /etc/init.d/ %define local_etc_initd /etc/init.d/
%define local_etc_pmond /etc/pmon.d/ %define local_etc_pmond /etc/pmon.d/

View File

@ -1,5 +1,5 @@
[Unit] [Unit]
Description=Titanium Cloud System Inventory Agent Description=StarlingX System Inventory Agent
After=nfscommon.service sw-patch.service After=nfscommon.service sw-patch.service
After=network-online.target systemd-udev-settle.service After=network-online.target systemd-udev-settle.service
Before=pmon.service Before=pmon.service

View File

@ -1,2 +1,2 @@
SRC_DIR="sysinv" SRC_DIR="sysinv"
TIS_PATCH_VER=345 TIS_PATCH_VER=PKG_GITREVCOUNT

View File

@ -91,7 +91,6 @@ systemconfig.helm_plugins.stx_monitor =
008_kube-state-metrics = sysinv.helm.kube_state_metrics:KubeStateMetricsHelm 008_kube-state-metrics = sysinv.helm.kube_state_metrics:KubeStateMetricsHelm
009_nginx-ingress = sysinv.helm.nginx_ingress:NginxIngressHelm 009_nginx-ingress = sysinv.helm.nginx_ingress:NginxIngressHelm
010_logstash = sysinv.helm.logstash:LogstashHelm 010_logstash = sysinv.helm.logstash:LogstashHelm
011_monitor_version_check = sysinv.helm.monitor_version_check:StxMonitorVersionCheckHelm
systemconfig.helm_plugins.stx_openstack = systemconfig.helm_plugins.stx_openstack =
001_ingress = sysinv.helm.ingress:IngressHelm 001_ingress = sysinv.helm.ingress:IngressHelm
@ -120,9 +119,8 @@ systemconfig.helm_plugins.stx_openstack =
024_ironic = sysinv.helm.ironic:IronicHelm 024_ironic = sysinv.helm.ironic:IronicHelm
025_placement = sysinv.helm.placement:PlacementHelm 025_placement = sysinv.helm.placement:PlacementHelm
026_nginx-ports-control = sysinv.helm.nginx_ports_control:NginxPortsControlHelm 026_nginx-ports-control = sysinv.helm.nginx_ports_control:NginxPortsControlHelm
027_version_check = sysinv.helm.openstack_version_check:StxOpenstackVersionCheckHelm 027_fm-rest-api = sysinv.helm.fm_rest_api:FmRestApiHelm
028_fm-rest-api = sysinv.helm.fm_rest_api:FmRestApiHelm 028_dcdbsync = sysinv.helm.dcdbsync:DcdbsyncHelm
029_dcdbsync = sysinv.helm.dcdbsync:DcdbsyncHelm
systemconfig.armada.manifest_ops = systemconfig.armada.manifest_ops =
generic = sysinv.helm.manifest_generic:GenericArmadaManifestOperator generic = sysinv.helm.manifest_generic:GenericArmadaManifestOperator

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2018-2019 Wind River Systems, Inc. # Copyright (c) 2018-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -28,6 +28,9 @@ from sysinv.api.controllers.v1 import cluster
from sysinv.api.controllers.v1 import community from sysinv.api.controllers.v1 import community
from sysinv.api.controllers.v1 import controller_fs from sysinv.api.controllers.v1 import controller_fs
from sysinv.api.controllers.v1 import cpu from sysinv.api.controllers.v1 import cpu
from sysinv.api.controllers.v1 import device_image
from sysinv.api.controllers.v1 import device_image_state
from sysinv.api.controllers.v1 import device_label
from sysinv.api.controllers.v1 import disk from sysinv.api.controllers.v1 import disk
from sysinv.api.controllers.v1 import datanetwork from sysinv.api.controllers.v1 import datanetwork
from sysinv.api.controllers.v1 import interface_datanetwork from sysinv.api.controllers.v1 import interface_datanetwork
@ -261,6 +264,15 @@ class V1(base.APIBase):
kube_host_upgrades = [link.Link] kube_host_upgrades = [link.Link]
"Links to the kube_host_upgrade resource" "Links to the kube_host_upgrade resource"
device_images = [link.Link]
"Links to the device images resource"
device_image_state = [link.Link]
"Links to the device image state resource"
device_labels = [link.Link]
"Links to the device labels resource"
@classmethod @classmethod
def convert(self): def convert(self):
v1 = V1() v1 = V1()
@ -809,6 +821,26 @@ class V1(base.APIBase):
'kube_host_upgrades', '', 'kube_host_upgrades', '',
bookmark=True)] bookmark=True)]
v1.device_images = [link.Link.make_link('self', pecan.request.host_url,
'device_images', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'device_images', '',
bookmark=True)]
v1.device_image_state = [link.Link.make_link('self', pecan.request.host_url,
'device_image_state', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'device_image_state', '',
bookmark=True)]
v1.device_labels = [link.Link.make_link('self', pecan.request.host_url,
'device_labels', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'device_labels', '',
bookmark=True)]
return v1 return v1
@ -880,6 +912,9 @@ class Controller(rest.RestController):
kube_versions = kube_version.KubeVersionController() kube_versions = kube_version.KubeVersionController()
kube_upgrade = kube_upgrade.KubeUpgradeController() kube_upgrade = kube_upgrade.KubeUpgradeController()
kube_host_upgrades = kube_host_upgrade.KubeHostUpgradeController() kube_host_upgrades = kube_host_upgrade.KubeHostUpgradeController()
device_images = device_image.DeviceImageController()
device_image_state = device_image_state.DeviceImageStateController()
device_labels = device_label.DeviceLabelController()
@wsme_pecan.wsexpose(V1) @wsme_pecan.wsexpose(V1)
def get(self): def get(self):

View File

@ -1,4 +1,4 @@
# Copyright (c) 2013-2015 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -281,11 +281,6 @@ def check_core_allocations(host, cpu_counts):
elif total_platform_cores == 0: elif total_platform_cores == 0:
raise wsme.exc.ClientSideError("%s must have at least one core." % raise wsme.exc.ClientSideError("%s must have at least one core." %
constants.PLATFORM_FUNCTION) constants.PLATFORM_FUNCTION)
for s in range(1, len(host.nodes)):
if cpu_counts[s][constants.PLATFORM_FUNCTION] > 0:
raise wsme.exc.ClientSideError(
"%s cores can only be allocated on Processor 0" %
constants.PLATFORM_FUNCTION)
# Validate shared cores # Validate shared cores
for s in range(0, len(host.nodes)): for s in range(0, len(host.nodes)):
@ -313,34 +308,18 @@ def check_core_allocations(host, cpu_counts):
"The %s function can only be assigned up to %s cores." % "The %s function can only be assigned up to %s cores." %
(constants.VSWITCH_FUNCTION.lower(), VSWITCH_MAX_CORES)) (constants.VSWITCH_FUNCTION.lower(), VSWITCH_MAX_CORES))
# Validate Isolated cores # Validate Isolated cores:
# We can allocate platform cores on numa 0, otherwise all isolated # - Prevent isolated core assignment if vswitch or shared cores are
# cores must in a contiguous block after the platform cores. # allocated.
if total_isolated_cores > 0: if total_isolated_cores > 0:
if total_vswitch_cores != 0 or total_shared_cores != 0: if total_vswitch_cores != 0 or total_shared_cores != 0:
raise wsme.exc.ClientSideError( raise wsme.exc.ClientSideError(
"%s cores can only be configured with %s and %s core types." % "%s cores can only be configured with %s and %s core types." %
(constants.ISOLATED_FUNCTION, constants.PLATFORM_FUNCTION, (constants.ISOLATED_FUNCTION, constants.PLATFORM_FUNCTION,
constants.APPLICATION_FUNCTION)) constants.APPLICATION_FUNCTION))
has_application_cpus = False
for s in range(0, len(host.nodes)):
numa_counts = cpu_counts[s]
isolated_cores_requested = \
numa_counts[constants.ISOLATED_FUNCTION]
if has_application_cpus and isolated_cores_requested:
raise wsme.exc.ClientSideError(
"%s and %s cpus must be contiguous" %
(constants.PLATFORM_FUNCTION, constants.ISOLATED_FUNCTION))
platform_cores_requested = \
numa_counts[constants.PLATFORM_FUNCTION]
available_cores = len(host.cpu_lists[s])
if platform_cores_requested + isolated_cores_requested \
!= available_cores:
has_application_cpus = True
reserved_for_applications = len(host.cpus) - total_platform_cores - \ reserved_for_applications = len(host.cpus) - total_platform_cores - \
total_vswitch_cores total_vswitch_cores - total_isolated_cores
if reserved_for_applications <= 0: if reserved_for_applications <= 0:
raise wsme.exc.ClientSideError( raise wsme.exc.ClientSideError(
"There must be at least one unused core for %s." % "There must be at least one unused core for %s." %

View File

@ -0,0 +1,478 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import pecan
from pecan import expose
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from oslo_log import log
from sysinv._i18n import _
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import device as dconstants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
LOG = log.getLogger(__name__)
ALLOWED_BITSTREAM_TYPES = [
dconstants.BITSTREAM_TYPE_ROOT_KEY,
dconstants.BITSTREAM_TYPE_FUNCTIONAL,
dconstants.BITSTREAM_TYPE_KEY_REVOCATION,
]
class DeviceImagePatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class DeviceImage(base.APIBase):
"""API representation of a device_image.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
a device image.
"""
id = int
"Unique ID for this device_image"
uuid = types.uuid
"Unique UUID for this device_image"
bitstream_type = wtypes.text
"The bitstream type of the device image"
pci_vendor = wtypes.text
"The vendor ID of the pci device"
pci_device = wtypes.text
"The device ID of the pci device"
bitstream_id = wtypes.text
"The bitstream id of the functional device image"
key_signature = wtypes.text
"The key signature of the root-key device image"
revoke_key_id = int
"The key revocation id of the key revocation device image"
name = wtypes.text
"The name of the device image"
description = wtypes.text
"The description of the device image"
image_version = wtypes.text
"The version of the device image"
applied = bool
"Represent current status: created or applied"
applied_labels = types.MultiType({dict})
"Represent a list of key-value pair of labels"
def __init__(self, **kwargs):
self.fields = list(objects.device_image.fields.keys())
for k in self.fields:
setattr(self, k, kwargs.get(k))
# API-only attribute
self.fields.append('action')
setattr(self, 'action', kwargs.get('action', None))
# 'applied_labels' is not part of the object.device_image.fields
# (it is an API-only attribute)
self.fields.append('applied_labels')
setattr(self, 'applied_labels', kwargs.get('applied_labels', None))
@classmethod
def convert_with_links(cls, rpc_device_image, expand=True):
device_image = DeviceImage(**rpc_device_image.as_dict())
if not expand:
device_image.unset_fields_except(
['id', 'uuid', 'bitstream_type', 'pci_vendor', 'pci_device',
'bitstream_id', 'key_signature', 'revoke_key_id',
'name', 'description', 'image_version', 'applied_labels'])
# insert applied labels for this device image if they exist
device_image = _get_applied_labels(device_image)
# do not expose the id attribute
device_image.id = wtypes.Unset
return device_image
def _validate_bitstream_type(self):
if self.bitstream_type not in ALLOWED_BITSTREAM_TYPES:
raise ValueError(_("Bitstream type %s not supported") %
self.bitstream_type)
def validate_syntax(self):
"""
Validates the syntax of each field.
"""
self._validate_bitstream_type()
class DeviceImageCollection(collection.Collection):
"""API representation of a collection of device_image."""
device_images = [DeviceImage]
"A list containing device_image objects"
def __init__(self, **kwargs):
self._type = 'device_images'
@classmethod
def convert_with_links(cls, rpc_device_images, limit, url=None,
expand=False, **kwargs):
collection = DeviceImageCollection()
collection.device_images = [DeviceImage.convert_with_links(p, expand)
for p in rpc_device_images]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
def _get_applied_labels(device_image):
if not device_image:
return device_image
image_labels = pecan.request.dbapi.device_image_label_get_by_image(
device_image.id)
if image_labels:
applied_labels = {}
for image_label in image_labels:
label = pecan.request.dbapi.device_label_get(image_label.label_uuid)
applied_labels[label.label_key] = label.label_value
device_image.applied_labels = applied_labels
return device_image
LOCK_NAME = 'DeviceImageController'
class DeviceImageController(rest.RestController):
"""REST controller for device_image."""
def __init__(self, parent=None, **kwargs):
self._parent = parent
def _get_device_image_collection(
self, marker=None, limit=None, sort_key=None,
sort_dir=None, expand=False, resource_url=None):
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.device_image.get_by_uuid(
pecan.request.context,
marker)
deviceimages = pecan.request.dbapi.deviceimages_get_all(
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
return DeviceImageCollection.convert_with_links(
deviceimages, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
def _get_one(self, deviceimage_uuid):
rpc_deviceimage = objects.device_image.get_by_uuid(
pecan.request.context, deviceimage_uuid)
return DeviceImage.convert_with_links(rpc_deviceimage)
@wsme_pecan.wsexpose(DeviceImageCollection,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of device images."""
return self._get_device_image_collection(marker, limit,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(DeviceImage, wtypes.text)
def get_one(self, deviceimage_uuid):
"""Retrieve a single device image."""
return self._get_one(deviceimage_uuid)
@expose('json')
@cutils.synchronized(LOCK_NAME)
def post(self):
"""Create a new device image."""
fileitem = pecan.request.POST['file']
if not fileitem.filename:
return dict(success="", error="Error: No file uploaded")
try:
file_content = fileitem.file.read()
except Exception as e:
return dict(
success="",
error=("No bitstream file has been added, "
"invalid file: %s" % e))
field_list = ['uuid', 'bitstream_type', 'pci_vendor', 'pci_device',
'bitstream_id', 'key_signature', 'revoke_key_id',
'name', 'description', 'image_version']
data = dict((k, v) for (k, v) in pecan.request.POST.items()
if k in field_list and not (v is None))
msg = _validate_syntax(data)
if msg:
return dict(success="", error=msg)
device_image = pecan.request.dbapi.deviceimage_create(data)
device_image_dict = device_image.as_dict()
# Save the file contents in a temporary location
filename = cutils.format_image_filename(device_image)
image_file_path = os.path.join(dconstants.DEVICE_IMAGE_TMP_PATH, filename)
if not os.path.exists(dconstants.DEVICE_IMAGE_TMP_PATH):
os.makedirs(dconstants.DEVICE_IMAGE_TMP_PATH)
with os.fdopen(os.open(image_file_path,
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_DEFAULT),
'wb') as f:
f.write(file_content)
# Call rpc to move the bitstream file to the final destination
pecan.request.rpcapi.store_bitstream_file(pecan.request.context, filename)
return dict(success="", error="", device_image=device_image_dict)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, deviceimage_uuid):
"""Delete a device image."""
# TODO Only allow delete if there are no devices using the image
device_image = objects.device_image.get_by_uuid(
pecan.request.context, deviceimage_uuid)
filename = cutils.format_image_filename(device_image)
pecan.request.rpcapi.delete_bitstream_file(pecan.request.context,
filename)
pecan.request.dbapi.deviceimage_destroy(deviceimage_uuid)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(DeviceImage, types.uuid, wtypes.text, body=types.apidict)
def patch(self, uuid, action, body):
"""Apply/Remove a device image to/from host ."""
if action not in [dconstants.APPLY_ACTION, dconstants.REMOVE_ACTION]:
raise exception.OperationNotPermitted
try:
device_image = objects.device_image.get_by_uuid(
pecan.request.context, uuid)
except exception.DeviceImageNotFound:
LOG.error("Device image %s deos not exist." % uuid)
raise wsme.exc.ClientSideError(_(
"Device image {} failed: image does not exist".format(action)))
# For now, update status in fpga_device
# find device label with matching label key and value
for key, value in body.items():
device_labels = pecan.request.dbapi.device_label_get_by_label(
key, value)
if not device_labels:
raise wsme.exc.ClientSideError(_(
"Device image {} failed: label {}={} does not exist".format(
action, key, value)))
break
for device_label in device_labels:
if action == dconstants.APPLY_ACTION:
process_device_image_apply(device_label.pcidevice_id,
device_image, device_label.id)
# Create an entry of image to label mapping
pecan.request.dbapi.device_image_label_create({
'image_id': device_image.id,
'label_id': device_label.id,
})
update_device_image_state(device_label.host_id,
device_label.pcidevice_id,
device_image.id, dconstants.DEVICE_IMAGE_UPDATE_PENDING)
# Update flags in pci_device and host
modify_flags(device_label.pcidevice_id, device_label.host_id)
elif action == dconstants.REMOVE_ACTION:
try:
img_lbl = pecan.request.dbapi.device_image_label_get_by_image_label(
device_image.id, device_label.id)
if img_lbl:
pecan.request.dbapi.device_image_label_destroy(img_lbl.id)
except exception.DeviceImageLabelNotFoundByKey:
raise wsme.exc.ClientSideError(_(
"Device image {} not associated with label {}={}".format(
device_image.uuid, device_label.label_key,
device_label.label_value
)))
delete_device_image_state(device_label.pcidevice_id, device_image)
if not body:
# No host device labels specified, apply to all hosts
LOG.info("No host device labels specified")
hosts = pecan.request.dbapi.ihost_get_list()
for host in hosts:
fpga_devices = pecan.request.dbapi.fpga_device_get_by_host(host.id)
for dev in fpga_devices:
if action == dconstants.APPLY_ACTION:
process_device_image_apply(dev.pci_id, device_image)
update_device_image_state(host.id,
dev.pci_id, device_image.id,
dconstants.DEVICE_IMAGE_UPDATE_PENDING)
# Update flags in pci_device and host
modify_flags(dev.pci_id, dev.host_id)
elif action == dconstants.REMOVE_ACTION:
delete_device_image_state(dev.pci_id, device_image)
return DeviceImage.convert_with_links(device_image)
def _validate_bitstream_type(dev_img):
msg = None
if dev_img['bitstream_type'] not in ALLOWED_BITSTREAM_TYPES:
msg = _("Bitstream type %s not supported" % dev_img['bitstream_type'])
elif (dev_img['bitstream_type'] == dconstants.BITSTREAM_TYPE_FUNCTIONAL and
'bitstream_id' not in dev_img):
msg = _("bitstream_id is required for functional bitstream type")
elif (dev_img['bitstream_type'] == dconstants.BITSTREAM_TYPE_ROOT_KEY and
'key_signature' not in dev_img):
msg = _("key_signature is required for root key bitstream type")
elif (dev_img['bitstream_type'] == dconstants.BITSTREAM_TYPE_KEY_REVOCATION and
'revoke_key_id' not in dev_img):
msg = _("revoke_key_id is required for key revocation bitstream type")
return msg
def _is_hex_string(s):
try:
int(s, 16)
return True
except ValueError:
return False
def _validate_hexadecimal_fields(dev_img):
msg = None
if ('pci_vendor' in dev_img.keys() and
not _is_hex_string(dev_img['pci_vendor'])):
msg = _("pci_vendor must be hexadecimal")
elif ('pci_device' in dev_img.keys() and
not _is_hex_string(dev_img['pci_device'])):
msg = _("pci_device must be hexadecimal")
elif ('bitstream_id' in dev_img.keys() and
not _is_hex_string(dev_img['bitstream_id'])):
msg = _("bitstream_id must be hexadecimal")
elif ('key_signature' in dev_img.keys() and
not _is_hex_string(dev_img['key_signature'])):
msg = _("key_signature must be hexadecimal")
return msg
def _check_revoke_key(dev_img):
msg = None
if ('revoke_key_id' in dev_img.keys()):
if str(dev_img['revoke_key_id']).isdigit():
dev_img['revoke_key_id'] = int(dev_img['revoke_key_id'])
else:
msg = _("revoke_key_id must be an integer")
return msg
def _validate_syntax(device_image):
"""
Validates the syntax of each field.
"""
msg = _validate_hexadecimal_fields(device_image)
if not msg:
msg = _validate_bitstream_type(device_image)
if not msg:
msg = _check_revoke_key(device_image)
return msg
def update_device_image_state(host_id, pcidevice_id, image_id, status):
try:
dev_img_state = pecan.request.dbapi.device_image_state_get_by_image_device(
image_id, pcidevice_id)
pecan.request.dbapi.device_image_state_update(dev_img_state.id,
{'status': status})
except exception.DeviceImageStateNotFoundByKey:
# Create an entry of image to device mapping
state_values = {
'host_id': host_id,
'pcidevice_id': pcidevice_id,
'image_id': image_id,
'status': status,
}
pecan.request.dbapi.device_image_state_create(state_values)
def process_device_image_apply(pcidevice_id, device_image, label_id=None):
pci_device = pecan.request.dbapi.pci_device_get(pcidevice_id)
host = pecan.request.dbapi.ihost_get(pci_device.host_uuid)
# check if device image with type functional or root-key already applied
# to the device
records = pecan.request.dbapi.device_image_state_get_all(
host_id=host.id, pcidevice_id=pcidevice_id)
for r in records:
img = pecan.request.dbapi.deviceimage_get(r.image_id)
if img.bitstream_type == device_image.bitstream_type:
if img.bitstream_type == dconstants.BITSTREAM_TYPE_ROOT_KEY:
# Block applying root-key image if another one is already applied
msg = _("Root-key image {} is already applied to host {} device"
" {}".format(img.uuid, host.hostname, pci_device.pciaddr))
raise wsme.exc.ClientSideError(msg)
elif img.bitstream_type == dconstants.BITSTREAM_TYPE_FUNCTIONAL:
if r.status == dconstants.DEVICE_IMAGE_UPDATE_IN_PROGRESS:
msg = _("Applying image {} for host {} device {} not allowed "
"while device image update is in progress".format(
device_image.uuid, host.hostname, pci_device.pciaddr))
raise wsme.exc.ClientSideError(msg)
# Remove the existing device_image_state record
pecan.request.dbapi.device_image_state_destroy(r.uuid)
# Remove the existing device image label if any
if label_id:
try:
img_lbl = pecan.request.dbapi.device_image_label_get_by_image_label(
img.id, label_id)
pecan.request.dbapi.device_image_label_destroy(img_lbl.uuid)
except exception.DeviceImageLabelNotFoundByKey:
pass
def delete_device_image_state(pcidevice_id, device_image):
try:
dev_img = pecan.request.dbapi.device_image_state_get_by_image_device(
device_image.id, pcidevice_id)
pecan.request.dbapi.device_image_state_destroy(dev_img.uuid)
except exception.DeviceImageStateNotFoundByKey:
pass
def modify_flags(pcidevice_id, host_id):
# Set flag for pci_device indicating device requires image update
pecan.request.dbapi.pci_device_update(pcidevice_id,
{'needs_firmware_update': True},
host_id)
# Set flag for host indicating device image update is pending if it is
# not already in progress
host = pecan.request.dbapi.ihost_get(host_id)
if host.device_image_update != dconstants.DEVICE_IMAGE_UPDATE_IN_PROGRESS:
pecan.request.dbapi.ihost_update(host_id,
{'device_image_update': dconstants.DEVICE_IMAGE_UPDATE_PENDING})

View File

@ -0,0 +1,152 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from oslo_log import log
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv import objects
LOG = log.getLogger(__name__)
class DeviceImageState(base.APIBase):
"""API representation of a device_image_state.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
a device image.
"""
id = int
"Unique ID for this device_image_state"
uuid = types.uuid
"Unique UUID for this device_image_state"
host_id = int
"Represent the host id of the host that the pci_device belongs to"
host_uuid = types.uuid
"Represent the UUID of the host that the pci_device belongs to"
pcidevice_id = int
"Represent the id of pci_device"
pcidevice_uuid = types.uuid
"Represent the uuid of pci_device"
image_id = int
"Represent the id of device image"
image_uuid = types.uuid
"Represent the uuid of device image"
status = wtypes.text
"Firmware update status"
update_start_time = wtypes.datetime.datetime
"Represents the start time of the device image update"
updated_at = wtypes.datetime.datetime
"The time at which the record is updated "
links = [link.Link]
"A list containing a self link and associated device image state links"
def __init__(self, **kwargs):
self.fields = list(objects.device_image_state.fields.keys())
for k in self.fields:
setattr(self, k, kwargs.get(k))
@classmethod
def convert_with_links(cls, rpc_device_image_state, expand=True):
device_image_state = DeviceImageState(**rpc_device_image_state.as_dict())
if not expand:
device_image_state.unset_fields_except(
['id', 'uuid', 'host_id', 'host_uuid',
'pcidevice_id', 'pcidevice_uuid',
'image_id', 'image_uuid', 'status',
'update_start_time', 'updated_at'])
# do not expose the id attribute
device_image_state.host_id = wtypes.Unset
return device_image_state
class DeviceImageStateCollection(collection.Collection):
"""API representation of a collection of device_image_state."""
device_image_state = [DeviceImageState]
"A list containing device_image_state objects"
def __init__(self, **kwargs):
self._type = 'device_image_state'
@classmethod
def convert_with_links(cls, rpc_device_image_state, limit, url=None,
expand=False, **kwargs):
collection = DeviceImageStateCollection()
collection.device_image_state = [DeviceImageState.convert_with_links(p, expand)
for p in rpc_device_image_state]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'DeviceImageStateController'
class DeviceImageStateController(rest.RestController):
"""REST controller for device image state."""
def __init__(self, parent=None, **kwargs):
self._parent = parent
def _get_device_image_state_collection(
self, marker=None, limit=None, sort_key=None,
sort_dir=None, expand=False, resource_url=None):
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.device_image_state.get_by_uuid(
pecan.request.context,
marker)
states = pecan.request.dbapi.device_image_state_get_list(
limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
return DeviceImageStateCollection.convert_with_links(
states, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
def _get_one(self, uuid):
obj = objects.device_image_state.get_by_uuid(
pecan.request.context, uuid)
return DeviceImageState.convert_with_links(obj)
@wsme_pecan.wsexpose(DeviceImageStateCollection,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of device image state."""
return self._get_device_image_state_collection(marker, limit,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(DeviceImageState, wtypes.text)
def get_one(self, deviceimagestate_uuid):
"""Retrieve a single device image state."""
return self._get_one(deviceimagestate_uuid)

View File

@ -0,0 +1,244 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from oslo_log import log
from sysinv._i18n import _
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
LOG = log.getLogger(__name__)
class DeviceLabelPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class DeviceLabel(base.APIBase):
"""API representation of a device label.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
a device label.
"""
id = int
"Unique ID for this device label"
uuid = types.uuid
"Unique UUID for this device label"
host_id = int
"Represent the id of host the device label belongs to"
host_uuid = types.uuid
"Represent the uuid of the host the device label belongs to"
pcidevice_id = int
"Represent the id of pci_device the device label belongs to"
pcidevice_uuid = types.uuid
"Represent the uuid of the pci_device the device label belongs to"
label_key = wtypes.text
"Represents a label key assigned to the device"
label_value = wtypes.text
"Represents a label value assigned to the device"
def __init__(self, **kwargs):
self.fields = list(objects.device_label.fields.keys())
for k in self.fields:
setattr(self, k, kwargs.get(k))
# API-only attribute)
self.fields.append('action')
setattr(self, 'action', kwargs.get('action', None))
@classmethod
def convert_with_links(cls, rpc_device_label, expand=True):
device_label = DeviceLabel(**rpc_device_label.as_dict())
if not expand:
device_label.unset_fields_except(
['uuid', 'host_id', 'host_uuid', 'pcidevice_id', 'pcidevice_uuid',
'label_key', 'label_value'])
# do not expose the id attribute
device_label.host_id = wtypes.Unset
device_label.pcidevice_id = wtypes.Unset
return device_label
class DeviceLabelCollection(collection.Collection):
"""API representation of a collection of device label."""
device_labels = [DeviceLabel]
"A list containing device_label objects"
def __init__(self, **kwargs):
self._type = 'device_labels'
@classmethod
def convert_with_links(cls, rpc_device_labels, limit, url=None,
expand=False, **kwargs):
collection = DeviceLabelCollection()
collection.device_labels = [DeviceLabel.convert_with_links(p, expand)
for p in rpc_device_labels]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'DeviceLabelController'
class DeviceLabelController(rest.RestController):
"""REST controller for device label."""
def __init__(self, parent=None, **kwargs):
self._parent = parent
def _get_device_label_collection(
self, device_uuid, marker=None, limit=None, sort_key=None,
sort_dir=None, expand=False, resource_url=None):
if self._parent and not device_uuid:
raise exception.InvalidParameterValue(_(
"Device id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.device_label.get_by_uuid(
pecan.request.context,
marker)
if device_uuid:
device_labels = pecan.request.dbapi.device_label_get_by_device(
device_uuid, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
else:
device_labels = pecan.request.dbapi.device_label_get_list(
limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return DeviceLabelCollection.convert_with_links(
device_labels, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
def _get_one(self, device_label_uuid):
rpc_device_label = objects.device_label.get_by_uuid(
pecan.request.context, device_label_uuid)
return DeviceLabel.convert_with_links(rpc_device_label)
@wsme_pecan.wsexpose(DeviceLabelCollection, types.uuid, types.uuid,
int, wtypes.text, wtypes.text)
def get_all(self, uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of device labels."""
return self._get_device_label_collection(uuid, marker, limit,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(DeviceLabel, types.uuid)
def get_one(self, device_label_uuid):
"""Retrieve a single device label."""
try:
sp_label = objects.device_label.get_by_uuid(
pecan.request.context,
device_label_uuid)
except exception.InvalidParameterValue:
raise wsme.exc.ClientSideError(
_("No device label found for %s" % device_label_uuid))
return DeviceLabel.convert_with_links(sp_label)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(DeviceLabelCollection, types.boolean,
body=types.apidict)
def post(self, overwrite=False, body=None):
"""Assign a new device label."""
pcidevice_uuid = body['pcidevice_uuid']
del body['pcidevice_uuid']
pcidevice = objects.pci_device.get_by_uuid(pecan.request.context,
pcidevice_uuid)
fpgadevice = pecan.request.dbapi.fpga_device_get(pcidevice.pciaddr,
pcidevice.host_id)
existing_labels = {}
for label_key in body.keys():
label = None
try:
label = pecan.request.dbapi.device_label_query(
pcidevice.id, label_key)
except exception.DeviceLabelNotFoundByKey:
pass
if label:
if overwrite:
existing_labels.update({label_key: label.uuid})
else:
raise wsme.exc.ClientSideError(_(
"Label %s exists for device %s. Use overwrite option"
" to assign a new value." %
(label_key, pcidevice.name)))
new_records = []
for key, value in body.items():
values = {
'host_id': pcidevice.host_id,
'pcidevice_id': pcidevice.id,
'fpgadevice_id': fpgadevice.id,
'label_key': key,
'label_value': value
}
try:
if existing_labels.get(key, None):
# Update the value
label_uuid = existing_labels.get(key)
new_label = pecan.request.dbapi.device_label_update(
label_uuid, {'label_value': value})
else:
new_label = pecan.request.dbapi.device_label_create(
pcidevice_uuid, values)
new_records.append(new_label)
except exception.DeviceLabelAlreadyExists:
# We should not be here
raise wsme.exc.ClientSideError(_(
"Error creating label %s") % label_key)
return DeviceLabelCollection.convert_with_links(
new_records, limit=None, url=None, expand=False,
sort_key='id', sort_dir='asc')
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, device_label_uuid):
"""Delete a device label."""
pecan.request.dbapi.device_label_destroy(device_label_uuid)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(DeviceLabel, body=DeviceLabel)
def patch(self, device_label):
"""Modify a new device label."""
raise exception.OperationNotPermitted

View File

@ -88,6 +88,7 @@ from sysinv.api.controllers.v1 import patch_api
from sysinv.common import ceph from sysinv.common import ceph
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import device
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import kubernetes from sysinv.common import kubernetes
from sysinv.common import utils as cutils from sysinv.common import utils as cutils
@ -515,6 +516,12 @@ class Host(base.APIBase):
iscsi_initiator_name = wtypes.text iscsi_initiator_name = wtypes.text
"The iscsi initiator name (only used for worker hosts)" "The iscsi initiator name (only used for worker hosts)"
device_image_update = wtypes.text
"Represent the status of device image update of this ihost."
reboot_needed = types.boolean
" Represent whether a reboot is needed after device image update"
def __init__(self, **kwargs): def __init__(self, **kwargs):
self.fields = list(objects.host.fields.keys()) self.fields = list(objects.host.fields.keys())
for k in self.fields: for k in self.fields:
@ -1088,6 +1095,8 @@ class HostController(rest.RestController):
'wipe_osds': ['GET'], 'wipe_osds': ['GET'],
'kube_upgrade_control_plane': ['POST'], 'kube_upgrade_control_plane': ['POST'],
'kube_upgrade_kubelet': ['POST'], 'kube_upgrade_kubelet': ['POST'],
'device_image_update': ['POST'],
'device_image_update_abort': ['POST'],
} }
def __init__(self, from_isystem=False): def __init__(self, from_isystem=False):
@ -2670,7 +2679,8 @@ class HostController(rest.RestController):
# Set upgrade flag so controller-1 will upgrade after install # Set upgrade flag so controller-1 will upgrade after install
# This flag is guaranteed to be written on controller-0, since # This flag is guaranteed to be written on controller-0, since
# controller-1 must be locked to run the host-upgrade command. # controller-1 must be locked to run the host-upgrade command.
open(tsc.CONTROLLER_UPGRADE_FLAG, "w").close() # perform rpc to conductor to do the update with root privilege access
pecan.request.rpcapi.update_controller_upgrade_flag(pecan.request.context)
return Host.convert_with_links(rpc_ihost) return Host.convert_with_links(rpc_ihost)
@ -5391,6 +5401,28 @@ class HostController(rest.RestController):
raise wsme.exc.ClientSideError( raise wsme.exc.ClientSideError(
_("%s" % response['error_details'])) _("%s" % response['error_details']))
self._check_lock_controller_during_upgrade(hostupdate.ihost_orig['hostname'])
@staticmethod
def _check_lock_controller_during_upgrade(hostname):
# Check to ensure in valid upgrade state for host-lock
try:
upgrade = pecan.request.dbapi.software_upgrade_get_one()
except exception.NotFound:
# No upgrade in progress
return
if (upgrade.state in [constants.UPGRADE_STARTING] and
hostname == constants.CONTROLLER_1_HOSTNAME):
# Lock of controller-1 is not allowed during
# the UPGRADE_STARTING state
raise wsme.exc.ClientSideError(
_("host-lock %s is not allowed during upgrade state '%s'. "
"Upgrade state must be '%s'.") %
(hostname,
constants.UPGRADE_STARTING,
constants.UPGRADE_STARTED))
def check_unlock_application(self, hostupdate, force_unlock=False): def check_unlock_application(self, hostupdate, force_unlock=False):
LOG.info("%s ihost check_unlock_application" % hostupdate.displayid) LOG.info("%s ihost check_unlock_application" % hostupdate.displayid)
apps = pecan.request.dbapi.kube_app_get_all() apps = pecan.request.dbapi.kube_app_get_all()
@ -6819,6 +6851,45 @@ class HostController(rest.RestController):
host_obj.hostname) host_obj.hostname)
return Host.convert_with_links(host_obj) return Host.convert_with_links(host_obj)
# POST ihosts/<uuid>/device_image_update
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(Host, types.uuid)
def device_image_update(self, host_uuid):
""" Update device image on the specified host.
:param host_uuid: UUID of the host
"""
LOG.info("device_image_update host_uuid=%s " % host_uuid)
host_obj = objects.host.get_by_uuid(pecan.request.context, host_uuid)
# Set the flag indicating the host is in progress of
# updating device image
host_obj = pecan.request.dbapi.ihost_update(host_uuid,
{'device_image_update': device.DEVICE_IMAGE_UPDATE_IN_PROGRESS})
# Call rpcapi to tell conductor to begin device image update
pecan.request.rpcapi.host_device_image_update(
pecan.request.context, host_uuid)
return Host.convert_with_links(host_obj)
# POST ihosts/<uuid>/device_image_update_abort
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(Host, types.uuid)
def device_image_update_abort(self, host_uuid):
""" Abort device image update on the specified host.
:param host_uuid: UUID of the host
:param install_uuid: install_uuid.
"""
LOG.info("device_image_update_abort host_uuid=%s " % host_uuid)
host_obj = objects.host.get_by_uuid(pecan.request.context, host_uuid)
# Set the flag indicating the host is no longer updating the device
# image
pecan.request.dbapi.ihost_update(host_uuid,
{'device_image_update': device.DEVICE_IMAGE_UPDATE_PENDING})
# Call rpcapi to tell conductor to abort device image update
pecan.request.rpcapi.host_device_image_update_abort(
pecan.request.context, host_uuid)
return Host.convert_with_links(host_obj)
def _create_node(host, xml_node, personality, is_dynamic_ip): def _create_node(host, xml_node, personality, is_dynamic_ip):
host_node = et.SubElement(xml_node, 'host') host_node = et.SubElement(xml_node, 'host')

View File

@ -332,11 +332,161 @@ class HostFsController(rest.RestController):
@wsme_pecan.wsexpose(None, types.uuid, status_code=204) @wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, host_fs_uuid): def delete(self, host_fs_uuid):
"""Delete a host_fs.""" """Delete a host filesystem."""
raise exception.OperationNotPermitted
host_fs = objects.host_fs.get_by_uuid(pecan.request.context,
host_fs_uuid).as_dict()
ihost_uuid = host_fs['ihost_uuid']
host = pecan.request.dbapi.ihost_get(ihost_uuid)
_delete(host_fs)
try:
# Host must be available to add/remove fs at runtime
if host.availability in [constants.AVAILABILITY_AVAILABLE,
constants.AVAILABILITY_DEGRADED]:
# perform rpc to conductor to perform config apply
pecan.request.rpcapi.update_host_filesystem_config(
pecan.request.context,
host=host,
filesystem_list=[host_fs['name']],)
except Exception as e:
msg = _("Failed to delete filesystem %s" % host_fs['name'])
LOG.error("%s with exception %s" % (msg, e))
pecan.request.dbapi.host_fs_create(host.id, host_fs)
raise wsme.exc.ClientSideError(msg)
@cutils.synchronized(LOCK_NAME) @cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(HostFs, body=HostFs) @wsme_pecan.wsexpose(HostFs, body=HostFs)
def post(self, host_fs): def post(self, host_fs):
"""Create a new host_fs.""" """Create a host filesystem."""
raise exception.OperationNotPermitted
try:
host_fs = host_fs.as_dict()
host_fs = _create(host_fs)
ihost_uuid = host_fs['ihost_uuid']
ihost_uuid.strip()
host = pecan.request.dbapi.ihost_get(ihost_uuid)
except exception.SysinvException as e:
LOG.exception(e)
raise wsme.exc.ClientSideError(_("Invalid data: failed to create a"
" filesystem"))
try:
# Host must be available to add/remove fs at runtime
if host.availability in [constants.AVAILABILITY_AVAILABLE,
constants.AVAILABILITY_DEGRADED]:
# perform rpc to conductor to perform config apply
pecan.request.rpcapi.update_host_filesystem_config(
pecan.request.context,
host=host,
filesystem_list=[host_fs['name']],)
except Exception as e:
msg = _("Failed to add filesystem name for %s" % host.hostname)
LOG.error("%s with exception %s" % (msg, e))
pecan.request.dbapi.host_fs_destroy(host_fs['id'])
raise wsme.exc.ClientSideError(msg)
return HostFs.convert_with_links(host_fs)
def _check_host_fs(host_fs):
"""Check host state"""
if host_fs['name'] not in constants.FS_CREATION_ALLOWED:
raise wsme.exc.ClientSideError(
_("Unsupported filesystem. Only the following filesystems are supported\
for creation or deletion: %s" % str(constants.FS_CREATION_ALLOWED)))
ihost_uuid = host_fs['ihost_uuid']
ihost_uuid.strip()
try:
ihost = pecan.request.dbapi.ihost_get(ihost_uuid)
except exception.ServerNotFound:
raise wsme.exc.ClientSideError(_("Invalid ihost_uuid %s"
% ihost_uuid))
if ihost.personality != constants.CONTROLLER:
raise wsme.exc.ClientSideError(_("Filesystem can only be added "
"on controller nodes"))
# Host must be online/available/degraded to add/remove
# any filesystem specified in FS_CREATION_ALLOWED
if ihost.availability not in [constants.AVAILABILITY_AVAILABLE,
constants.AVAILABILITY_ONLINE,
constants.AVAILABILITY_DEGRADED]:
raise wsme.exc.ClientSideError(_("Filesystem can only be added when "
"controller node is in available/online/degraded"))
def _create(host_fs):
"""Create a host filesystem"""
_check_host_fs(host_fs)
ihost_uuid = host_fs['ihost_uuid']
ihost_uuid.strip()
ihost = pecan.request.dbapi.ihost_get(ihost_uuid)
# See if this filesystem name already exists
current_host_fs_list = pecan.request.dbapi.host_fs_get_by_ihost(ihost_uuid)
for fs in current_host_fs_list:
if fs['name'] == host_fs['name']:
raise wsme.exc.ClientSideError(_("Filesystem name (%s) "
"already present" %
fs['name']))
requested_growth_gib = int(float(host_fs['size']))
LOG.info("Requested growth in GiB: %s for fs %s on host %s" %
(requested_growth_gib, host_fs['name'], ihost_uuid))
cgtsvg_free_space_gib = utils.get_node_cgtsvg_limit(ihost)
if requested_growth_gib > cgtsvg_free_space_gib:
msg = _("HostFs update failed: Not enough free space on %s. "
"Current free space %s GiB, "
"requested total increase %s GiB" %
(constants.LVG_CGTS_VG, cgtsvg_free_space_gib, requested_growth_gib))
LOG.warning(msg)
raise wsme.exc.ClientSideError(msg)
data = {
'name': constants.FILESYSTEM_NAME_IMAGE_CONVERSION,
'size': host_fs['size'],
'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_IMAGE_CONVERSION]
}
forihostid = ihost['id']
host_fs = pecan.request.dbapi.host_fs_create(forihostid, data)
return host_fs
def _delete(host_fs):
"""Delete a host filesystem."""
_check_host_fs(host_fs)
if host_fs['name'] == constants.FILESYSTEM_NAME_IMAGE_CONVERSION:
try:
app = pecan.request.dbapi.kube_app_get(constants.HELM_APP_OPENSTACK)
if app.status != constants.APP_UPLOAD_SUCCESS:
raise wsme.exc.ClientSideError(_("Deleting filesystem %s is not allowed "
"when stx-openstack is in %s state" %
(host_fs['name'], app.status)))
except exception.KubeAppNotFound:
LOG.info("Application %s not found, deleting %s fs" %
constants.HELM_APP_OPENSTACK, host_fs['name'])
ihost = pecan.request.dbapi.ihost_get(host_fs['forihostid'])
try:
pecan.request.dbapi.host_fs_destroy(host_fs['id'])
except exception.HTTPNotFound:
msg = _("Deleting Filesystem failed: host %s filesystem %s"
% (ihost.hostname, host_fs['name']))
raise wsme.exc.ClientSideError(msg)

View File

@ -192,7 +192,7 @@ class InterfaceNetworkController(rest.RestController):
ethernet_port_mac = tmp_interface['imac'] ethernet_port_mac = tmp_interface['imac']
_update_host_mgmt_mac(host, ethernet_port_mac) _update_host_mgmt_mac(host, ethernet_port_mac)
cutils.perform_distributed_cloud_config(pecan.request.dbapi, cutils.perform_distributed_cloud_config(pecan.request.dbapi,
interface_uuid) interface_id)
return InterfaceNetwork.convert_with_links(result) return InterfaceNetwork.convert_with_links(result)

View File

@ -496,9 +496,19 @@ class KubeAppController(rest.RestController):
version = body.get('app_version', '') version = body.get('app_version', '')
name, version, mname, mfile = self._check_tarfile(tarfile, name, version, name, version, mname, mfile = self._check_tarfile(tarfile, name, version,
constants.APP_UPDATE_OP) constants.APP_UPDATE_OP)
reuse_overrides = False
if body.get('reuse_user_overrides') in ['true', 'True']: reuse_overrides_flag = body.get('reuse_user_overrides', None)
if reuse_overrides_flag is None:
# None means let the application decide
reuse_overrides = None
elif reuse_overrides_flag in ['true', 'True']:
reuse_overrides = True reuse_overrides = True
elif reuse_overrides_flag in ['false', 'False']:
reuse_overrides = False
else:
raise wsme.exc.ClientSideError(_(
"Application-update rejected: "
"invalid reuse_user_overrides setting."))
try: try:
applied_app = objects.kube_app.get_by_name(pecan.request.context, name) applied_app = objects.kube_app.get_by_name(pecan.request.context, name)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2015-2016 Wind River Systems, Inc. # Copyright (c) 2015-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -18,6 +18,7 @@ from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils from sysinv.api.controllers.v1 import utils
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import device as dconstants
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import utils as cutils from sysinv.common import utils as cutils
from sysinv import objects from sysinv import objects
@ -103,6 +104,30 @@ class PCIDevice(base.APIBase):
enabled = types.boolean enabled = types.boolean
"Represent the enabled status of the device" "Represent the enabled status of the device"
bmc_build_version = wtypes.text
"Represent the BMC build version of the fpga device"
bmc_fw_version = wtypes.text
"Represent the BMC firmware version of the fpga device"
root_key = wtypes.text
"Represent the root key of the fpga device"
revoked_key_ids = wtypes.text
"Represent the key revocation ids of the fpga device"
boot_page = wtypes.text
"Represent the boot page of the fpga device"
bitstream_id = wtypes.text
"Represent the bitstream id of the fpga device"
needs_firmware_update = types.boolean
"Represent whether firmware update is required for the fpga device"
status = wtypes.text
"Represent the status of the fpga device"
links = [link.Link] links = [link.Link]
"Represent a list containing a self link and associated device links" "Represent a list containing a self link and associated device links"
@ -123,12 +148,25 @@ class PCIDevice(base.APIBase):
'sriov_totalvfs', 'sriov_numvfs', 'sriov_totalvfs', 'sriov_numvfs',
'sriov_vfs_pci_address', 'driver', 'sriov_vfs_pci_address', 'driver',
'host_uuid', 'enabled', 'host_uuid', 'enabled',
'bmc_build_version', 'bmc_fw_version',
'root_key', 'revoked_key_ids',
'boot_page', 'bitstream_id',
'needs_firmware_update', 'status',
'created_at', 'updated_at']) 'created_at', 'updated_at'])
# do not expose the id attribute # do not expose the id attribute
device.host_id = wtypes.Unset device.host_id = wtypes.Unset
device.node_id = wtypes.Unset device.node_id = wtypes.Unset
# if not FPGA device, hide these attributes
if device.pclass_id != dconstants.PCI_DEVICE_CLASS_FPGA:
device.bmc_build_version = wtypes.Unset
device.bmc_fw_version = wtypes.Unset
device.root_key = wtypes.Unset
device.revoked_key_ids = wtypes.Unset
device.boot_page = wtypes.Unset
device.bitstream_id = wtypes.Unset
device.links = [link.Link.make_link('self', pecan.request.host_url, device.links = [link.Link.make_link('self', pecan.request.host_url,
'pci_devices', device.uuid), 'pci_devices', device.uuid),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
@ -241,6 +279,7 @@ class PCIDeviceController(rest.RestController):
rpc_device = objects.pci_device.get_by_uuid( rpc_device = objects.pci_device.get_by_uuid(
pecan.request.context, device_uuid) pecan.request.context, device_uuid)
return PCIDevice.convert_with_links(rpc_device) return PCIDevice.convert_with_links(rpc_device)
@cutils.synchronized(LOCK_NAME) @cutils.synchronized(LOCK_NAME)

View File

@ -44,7 +44,8 @@ SYSINV_ROUTE_MAX_PATHS = 4
# Defines the list of interface network types that support routes # Defines the list of interface network types that support routes
ALLOWED_NETWORK_TYPES = [constants.NETWORK_TYPE_DATA, ALLOWED_NETWORK_TYPES = [constants.NETWORK_TYPE_DATA,
constants.NETWORK_TYPE_MGMT, constants.NETWORK_TYPE_MGMT,
constants.NETWORK_TYPE_CLUSTER_HOST] constants.NETWORK_TYPE_CLUSTER_HOST,
constants.NETWORK_TYPE_STORAGE]
class Route(base.APIBase): class Route(base.APIBase):

View File

@ -215,6 +215,8 @@ class AuditLogging(hooks.PecanHook):
url_path = urlparse(state.request.path_qs).path url_path = urlparse(state.request.path_qs).path
def json_post_data(rest_state): def json_post_data(rest_state):
if 'form-data' in rest_state.request.headers.get('Content-Type'):
return " POST: {}".format(rest_state.request.params)
if not hasattr(rest_state.request, 'json'): if not hasattr(rest_state.request, 'json'):
return "" return ""
return " POST: {}".format(rest_state.request.json) return " POST: {}".format(rest_state.request.json)

View File

@ -27,6 +27,25 @@ def create_host_overrides(filename):
dbapi = api.get_instance() dbapi = api.get_instance()
data = {} data = {}
# Get the distributed cloud role info
system = dbapi.isystem_get_one()
if system.distributed_cloud_role:
data.update({'distributed_cloud_role': system.distributed_cloud_role})
else:
data.update({'distributed_cloud_role': 'none'})
# region_config and region_name are overriden for subclouds
if (system.distributed_cloud_role and
system.distributed_cloud_role == constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD):
data.update({'region_config': True})
data.update({'region_name': system.region_name})
data.update({'system_mode': system.system_mode})
if system.location:
data.update({'location': system.location})
if system.description:
data.update({'description': system.description})
# Get the DNS info # Get the DNS info
dns = dbapi.idns_get_one() dns = dbapi.idns_get_one()
if dns.nameservers: if dns.nameservers:
@ -107,6 +126,18 @@ def create_host_overrides(filename):
} }
data.update(pool_data) data.update(pool_data)
elif pool.name == 'system-controller-subnet':
pool_data = {'system_controller_subnet': subnet,
'system_controller_floating_address': pool.floating_address,
}
data.update(pool_data)
elif pool.name == 'system-controller-oam-subnet':
pool_data = {'system_controller_oam_subnet': subnet,
'system_controller_oam_floating_address': pool.floating_address,
}
data.update(pool_data)
docker_list = dbapi.service_parameter_get_all(service=constants.SERVICE_TYPE_DOCKER, docker_list = dbapi.service_parameter_get_all(service=constants.SERVICE_TYPE_DOCKER,
section=constants.SERVICE_PARAM_SECTION_DOCKER_PROXY) section=constants.SERVICE_PARAM_SECTION_DOCKER_PROXY)
for docker in docker_list: for docker in docker_list:

View File

@ -38,7 +38,7 @@ class CephApiOperator(object):
def __init__(self): def __init__(self):
self._ceph_api = ceph.CephWrapper( self._ceph_api = ceph.CephWrapper(
endpoint='http://localhost:5001') endpoint='http://localhost:{}'.format(constants.CEPH_MGR_PORT))
self._default_tier = constants.SB_TIER_DEFAULT_NAMES[ self._default_tier = constants.SB_TIER_DEFAULT_NAMES[
constants.SB_TIER_TYPE_CEPH] constants.SB_TIER_TYPE_CEPH]

View File

@ -1,5 +1,5 @@
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -303,7 +303,7 @@ DEFAULT_SMALL_BACKUP_STOR_SIZE = 20
DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = 5 DEFAULT_VIRTUAL_DATABASE_STOR_SIZE = 5
DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = 5 DEFAULT_VIRTUAL_BACKUP_STOR_SIZE = 5
DEFAULT_EXTENSION_STOR_SIZE = 1 DEFAULT_EXTENSION_STOR_SIZE = 1
DEFAULT_PATCH_VAULT_STOR_SIZE = 8 DEFAULT_DC_VAULT_STOR_SIZE = 15
DEFAULT_ETCD_STORE_SIZE = 1 DEFAULT_ETCD_STORE_SIZE = 1
BACKUP_OVERHEAD = 5 BACKUP_OVERHEAD = 5
@ -333,6 +333,7 @@ DEFAULT_SMALL_DISK_SIZE = 240
MINIMUM_DISK_SIZE = 154 MINIMUM_DISK_SIZE = 154
KUBERNETES_DOCKER_STOR_SIZE = 30 KUBERNETES_DOCKER_STOR_SIZE = 30
IMAGE_CONVERSION_SIZE = 1
DOCKER_DISTRIBUTION_STOR_SIZE = 16 DOCKER_DISTRIBUTION_STOR_SIZE = 16
ETCD_STOR_SIZE = 5 ETCD_STOR_SIZE = 5
KUBELET_STOR_SIZE = 10 KUBELET_STOR_SIZE = 10
@ -507,7 +508,7 @@ CONTROLLER_FS_AVAILABLE = 'available'
DRBD_PLATFORM = 'platform' DRBD_PLATFORM = 'platform'
DRBD_PGSQL = 'pgsql' DRBD_PGSQL = 'pgsql'
DRBD_EXTENSION = 'extension' DRBD_EXTENSION = 'extension'
DRBD_PATCH_VAULT = 'patch-vault' DRBD_DC_VAULT = 'dc-vault'
DRBD_ETCD = 'etcd' DRBD_ETCD = 'etcd'
DRBD_DOCKER_DISTRIBUTION = 'docker-distribution' DRBD_DOCKER_DISTRIBUTION = 'docker-distribution'
@ -521,27 +522,31 @@ FILESYSTEM_NAME_DOCKER = 'docker'
FILESYSTEM_NAME_DOCKER_DISTRIBUTION = 'docker-distribution' FILESYSTEM_NAME_DOCKER_DISTRIBUTION = 'docker-distribution'
FILESYSTEM_NAME_EXTENSION = 'extension' FILESYSTEM_NAME_EXTENSION = 'extension'
FILESYSTEM_NAME_ETCD = 'etcd' FILESYSTEM_NAME_ETCD = 'etcd'
FILESYSTEM_NAME_PATCH_VAULT = 'patch-vault' FILESYSTEM_NAME_DC_VAULT = 'dc-vault'
FILESYSTEM_NAME_KUBELET = 'kubelet' FILESYSTEM_NAME_KUBELET = 'kubelet'
FILESYSTEM_NAME_IMAGE_CONVERSION = 'image-conversion'
FILESYSTEM_LV_DICT = { FILESYSTEM_LV_DICT = {
FILESYSTEM_NAME_PLATFORM: 'platform-lv', FILESYSTEM_NAME_PLATFORM: 'platform-lv',
FILESYSTEM_NAME_BACKUP: 'backup-lv', FILESYSTEM_NAME_BACKUP: 'backup-lv',
FILESYSTEM_NAME_SCRATCH: 'scratch-lv', FILESYSTEM_NAME_SCRATCH: 'scratch-lv',
FILESYSTEM_NAME_IMAGE_CONVERSION: 'conversion-lv',
FILESYSTEM_NAME_DOCKER: 'docker-lv', FILESYSTEM_NAME_DOCKER: 'docker-lv',
FILESYSTEM_NAME_DOCKER_DISTRIBUTION: 'dockerdistribution-lv', FILESYSTEM_NAME_DOCKER_DISTRIBUTION: 'dockerdistribution-lv',
FILESYSTEM_NAME_DATABASE: 'pgsql-lv', FILESYSTEM_NAME_DATABASE: 'pgsql-lv',
FILESYSTEM_NAME_EXTENSION: 'extension-lv', FILESYSTEM_NAME_EXTENSION: 'extension-lv',
FILESYSTEM_NAME_ETCD: 'etcd-lv', FILESYSTEM_NAME_ETCD: 'etcd-lv',
FILESYSTEM_NAME_PATCH_VAULT: 'patch-vault-lv', FILESYSTEM_NAME_DC_VAULT: 'dc-vault-lv',
FILESYSTEM_NAME_KUBELET: 'kubelet-lv', FILESYSTEM_NAME_KUBELET: 'kubelet-lv',
} }
FS_CREATION_ALLOWED = [FILESYSTEM_NAME_IMAGE_CONVERSION]
FILESYSTEM_CONTROLLER_SUPPORTED_LIST = [ FILESYSTEM_CONTROLLER_SUPPORTED_LIST = [
FILESYSTEM_NAME_SCRATCH, FILESYSTEM_NAME_SCRATCH,
FILESYSTEM_NAME_BACKUP, FILESYSTEM_NAME_BACKUP,
FILESYSTEM_NAME_DOCKER, FILESYSTEM_NAME_DOCKER,
FILESYSTEM_NAME_KUBELET, FILESYSTEM_NAME_KUBELET,
FILESYSTEM_NAME_IMAGE_CONVERSION,
] ]
FILESYSTEM_WORKER_SUPPORTED_LIST = [ FILESYSTEM_WORKER_SUPPORTED_LIST = [
@ -568,7 +573,7 @@ SUPPORTED_REPLICATED_FILEYSTEM_LIST = [
FILESYSTEM_NAME_PLATFORM, FILESYSTEM_NAME_PLATFORM,
FILESYSTEM_NAME_DATABASE, FILESYSTEM_NAME_DATABASE,
FILESYSTEM_NAME_EXTENSION, FILESYSTEM_NAME_EXTENSION,
FILESYSTEM_NAME_PATCH_VAULT, FILESYSTEM_NAME_DC_VAULT,
FILESYSTEM_NAME_ETCD, FILESYSTEM_NAME_ETCD,
FILESYSTEM_NAME_DOCKER_DISTRIBUTION, FILESYSTEM_NAME_DOCKER_DISTRIBUTION,
] ]
@ -1051,6 +1056,9 @@ SERVICE_PARAM_NAME_OIDC_ISSUER_URL = 'oidc_issuer_url'
SERVICE_PARAM_NAME_OIDC_CLIENT_ID = 'oidc_client_id' SERVICE_PARAM_NAME_OIDC_CLIENT_ID = 'oidc_client_id'
SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM = 'oidc_username_claim' SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM = 'oidc_username_claim'
SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM = 'oidc_groups_claim' SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM = 'oidc_groups_claim'
SERVICE_PARAM_NAME_ADMISSION_PLUGINS = 'admission_plugins'
VALID_ADMISSION_PLUGINS = ['PodSecurityPolicy']
# ptp service parameters # ptp service parameters
SERVICE_PARAM_SECTION_PTP_GLOBAL = 'global' SERVICE_PARAM_SECTION_PTP_GLOBAL = 'global'
@ -1217,6 +1225,7 @@ PARTITION_CMD_MODIFY = "modify"
# User creatable, system managed, GUID partitions types. # User creatable, system managed, GUID partitions types.
PARTITION_USER_MANAGED_GUID_PREFIX = "ba5eba11-0000-1111-2222-" PARTITION_USER_MANAGED_GUID_PREFIX = "ba5eba11-0000-1111-2222-"
USER_PARTITION_PHYSICAL_VOLUME = PARTITION_USER_MANAGED_GUID_PREFIX + "000000000001" USER_PARTITION_PHYSICAL_VOLUME = PARTITION_USER_MANAGED_GUID_PREFIX + "000000000001"
# Note that 000000000002 is used for the persistent backup partition
LINUX_LVM_PARTITION = "e6d6d379-f507-44c2-a23c-238f2a3df928" LINUX_LVM_PARTITION = "e6d6d379-f507-44c2-a23c-238f2a3df928"
CEPH_DATA_PARTITION = "4fbd7e29-9d25-41b8-afd0-062c0ceff05d" CEPH_DATA_PARTITION = "4fbd7e29-9d25-41b8-afd0-062c0ceff05d"
CEPH_JOURNAL_PARTITION = "45b0969e-9b03-4f30-b4c6-b4b80ceff106" CEPH_JOURNAL_PARTITION = "45b0969e-9b03-4f30-b4c6-b4b80ceff106"
@ -1539,6 +1548,8 @@ DEFAULT_DNS_SERVICE_DOMAIN = 'cluster.local'
# Ansible bootstrap # Ansible bootstrap
ANSIBLE_BOOTSTRAP_FLAG = os.path.join(tsc.VOLATILE_PATH, ".ansible_bootstrap") ANSIBLE_BOOTSTRAP_FLAG = os.path.join(tsc.VOLATILE_PATH, ".ansible_bootstrap")
ANSIBLE_BOOTSTRAP_COMPLETED_FLAG = os.path.join(tsc.CONFIG_PATH,
".bootstrap_completed")
UNLOCK_READY_FLAG = os.path.join(tsc.PLATFORM_CONF_PATH, ".unlock_ready") UNLOCK_READY_FLAG = os.path.join(tsc.PLATFORM_CONF_PATH, ".unlock_ready")
INVENTORY_WAIT_TIMEOUT_IN_SECS = 90 INVENTORY_WAIT_TIMEOUT_IN_SECS = 90
@ -1547,6 +1558,10 @@ ANSIBLE_KUBE_NETWORKING_PLAYBOOK = \
'/usr/share/ansible/stx-ansible/playbooks/upgrade-k8s-networking.yml' '/usr/share/ansible/stx-ansible/playbooks/upgrade-k8s-networking.yml'
ANSIBLE_KUBE_PUSH_IMAGES_PLAYBOOK = \ ANSIBLE_KUBE_PUSH_IMAGES_PLAYBOOK = \
'/usr/share/ansible/stx-ansible/playbooks/push_k8s_images.yml' '/usr/share/ansible/stx-ansible/playbooks/push_k8s_images.yml'
ANSIBLE_PLATFORM_BACKUP_PLAYBOOK = \
'/usr/share/ansible/stx-ansible/playbooks/backup.yml'
ANSIBLE_KUBE_STATIC_IMAGES_PLAYBOOK = \
'/usr/share/ansible/stx-ansible/playbooks/upgrade-static-images.yml'
# Clock synchronization types # Clock synchronization types
NTP = 'ntp' NTP = 'ntp'
@ -1599,3 +1614,6 @@ HOST_BM_VALID_PROVISIONED_TYPE_LIST = [HOST_BM_TYPE_DYNAMIC,
DEVICE_PLUGINS_FILE = "enabled_kube_plugins" DEVICE_PLUGINS_FILE = "enabled_kube_plugins"
ENABLED_KUBE_PLUGINS = os.path.join(tsc.CONFIG_PATH, DEVICE_PLUGINS_FILE) ENABLED_KUBE_PLUGINS = os.path.join(tsc.CONFIG_PATH, DEVICE_PLUGINS_FILE)
KUBE_INTEL_GPU_DEVICE_PLUGIN_LABEL = "intelgpu=enabled" KUBE_INTEL_GPU_DEVICE_PLUGIN_LABEL = "intelgpu=enabled"
# Port on which ceph manager and ceph-mgr listens
CEPH_MGR_PORT = 7999

View File

@ -0,0 +1,26 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# PCI Device Class ID in hexidecimal string
PCI_DEVICE_CLASS_FPGA = '120000'
# Device Image
DEVICE_IMAGE_TMP_PATH = '/tmp/device_images'
DEVICE_IMAGE_PATH = '/opt/platform/device_images'
BITSTREAM_TYPE_ROOT_KEY = 'root-key'
BITSTREAM_TYPE_FUNCTIONAL = 'functional'
BITSTREAM_TYPE_KEY_REVOCATION = 'key-revocation'
# Device Image Status
DEVICE_IMAGE_UPDATE_PENDING = 'pending'
DEVICE_IMAGE_UPDATE_IN_PROGRESS = 'in-progress'
DEVICE_IMAGE_UPDATE_COMPLETED = 'completed'
DEVICE_IMAGE_UPDATE_FAILED = 'failed'
# Device Image Action
APPLY_ACTION = 'apply'
REMOVE_ACTION = 'remove'

View File

@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# Copyright 2010 United States Government as represented by the # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. # Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved. # All Rights Reserved.
@ -387,6 +387,10 @@ class PCIAddrAlreadyExists(Conflict):
"for %(host)s already exists.") "for %(host)s already exists.")
class PCIAddrNotFound(Conflict):
message = _("A Device with PCI address %(pciaddr)s could not be found.")
class LvmLvgAlreadyExists(Conflict): class LvmLvgAlreadyExists(Conflict):
message = _("LVM Local Volume Group %(name)s for %(host)s already exists.") message = _("LVM Local Volume Group %(name)s for %(host)s already exists.")
@ -1318,6 +1322,83 @@ class FilesystemAlreadyExists(Conflict):
class FilesystemNotFound(NotFound): class FilesystemNotFound(NotFound):
message = _("Host FS with id %(fs_id)s not found") message = _("Host FS with id %(fs_id)s not found")
# Device image
class UnsupportedDeviceImageBitstreamType(Conflict):
message = _("Device image with bitstream type '%(bitstream_type)s' "
"is not supported.")
class DeviceImageNotFound(NotFound):
message = _("Device image %(deviceimage_uuid)s could not be found.")
class DeviceImageTypeNotFound(NotFound):
message = _("Device image of type %(bitstream_type)s could not be found.")
class DeviceImageIDNotFound(NotFound):
message = _("Device image with id %(id)s could not be found.")
class DeviceImageNameNotFound(NotFound):
message = _("Device image with name %(name)s could not be found.")
class DeviceImageAlreadyExists(Conflict):
message = _("Device image of name %(name)s already exists.")
class DeviceImageTypeUnsupported(Conflict):
message = _("Device image of type %(bitstream_type)s is not supported.")
# Device Label
class DeviceLabelNotFound(NotFound):
message = _("Device label %(uuid)s could not be found.")
class DeviceLabelAlreadyExists(Conflict):
message = _("Device label %(label)s already "
"exists on this host %(host)s.")
class DeviceLabelNotFoundByKey(NotFound):
message = _("Device label %(label)s could not be found.")
class DeviceLabelInvalid(Invalid):
message = _("Device label is invalid. Reason: %(reason)s")
# Device Image Label
class DeviceImageLabelNotFound(NotFound):
message = _("Device image label %(uuid)s could not be found.")
class DeviceImageLabelAlreadyExists(Conflict):
message = _("Device image is already applied to label %(uuid)s.")
class DeviceImageLabelNotFoundByKey(NotFound):
message = _("Device image %(image_id)s "
"and label ID %(label_id)s not found")
# Device Image State
class DeviceImageStateAlreadyExists(Conflict):
message = _(
"A device to image mapping with id %(uuid)s already exists.")
class DeviceImageStateNotFound(NotFound):
message = _("A device to image mapping with id %(id)s not found")
class DeviceImageStateNotFoundByKey(NotFound):
message = _("Device image %(image_id)s "
"and device ID %(device_id)s not found")
# #
# Kubernetes application and Helm related exceptions # Kubernetes application and Helm related exceptions
# #
@ -1373,6 +1454,10 @@ class KubeNamespaceDeleteTimeout(SysinvException):
message = "Namespace %(name)s deletion timeout." message = "Namespace %(name)s deletion timeout."
class KubePodTerminateTimeout(SysinvException):
message = "Namespace %(name)s pod termination timeout."
class KubePodDeleteTimeout(SysinvException): class KubePodDeleteTimeout(SysinvException):
message = "Pod %(namespace)/%(name)s deletion timeout." message = "Pod %(namespace)/%(name)s deletion timeout."
@ -1439,3 +1524,8 @@ class KubeUpgradeNotFound(NotFound):
class KubeVersionNotFound(NotFound): class KubeVersionNotFound(NotFound):
message = _("Kubernetes version %(version)s not found") message = _("Kubernetes version %(version)s not found")
class KubeNotConfigured(SysinvException):
message = _("Kubernetes is not configured. API operations "
"will not be available.")

View File

@ -6,9 +6,6 @@
from eventlet.green import subprocess from eventlet.green import subprocess
import os import os
from fm_api import fm_api
from oslo_log import log from oslo_log import log
from sysinv._i18n import _ from sysinv._i18n import _
from sysinv.common import ceph from sysinv.common import ceph
@ -99,20 +96,15 @@ class Health(object):
def _check_alarms(self, context, force=False): def _check_alarms(self, context, force=False):
"""Checks that no alarms are active""" """Checks that no alarms are active"""
db_alarms = fmclient(context).alarm.list(include_suppress=True) alarms = fmclient(context).alarm.list(include_suppress=True)
success = True success = True
allowed = 0 allowed = 0
affecting = 0 affecting = 0
# Only fail if we find alarms past their affecting threshold # Separate alarms that are mgmt affecting
for db_alarm in db_alarms: for alarm in alarms:
if isinstance(db_alarm, tuple): mgmt_affecting = alarm.mgmt_affecting == "True"
alarm = db_alarm[0] if not mgmt_affecting:
mgmt_affecting = db_alarm[constants.DB_MGMT_AFFECTING]
else:
alarm = db_alarm
mgmt_affecting = db_alarm.mgmt_affecting
if fm_api.FaultAPIs.alarm_allowed(alarm.severity, mgmt_affecting):
allowed += 1 allowed += 1
if not force: if not force:
success = False success = False
@ -125,18 +117,13 @@ class Health(object):
def get_alarms_degrade(self, context, alarm_ignore_list=None, def get_alarms_degrade(self, context, alarm_ignore_list=None,
entity_instance_id_filter=""): entity_instance_id_filter=""):
"""Return all the alarms that cause the degrade""" """Return all the alarms that cause the degrade"""
db_alarms = fmclient(context).alarm.list(include_suppress=True) alarms = fmclient(context).alarm.list(include_suppress=True)
degrade_alarms = [] degrade_alarms = []
if alarm_ignore_list is None: if alarm_ignore_list is None:
alarm_ignore_list = [] alarm_ignore_list = []
for db_alarm in db_alarms: for alarm in alarms:
if isinstance(db_alarm, tuple): degrade_affecting = alarm.degrade_affecting
alarm = db_alarm[0]
degrade_affecting = db_alarm[constants.DB_DEGRADE_AFFECTING]
else:
alarm = db_alarm
degrade_affecting = db_alarm.degrade_affecting
# Ignore alarms that are part of the ignore list sent as parameter # Ignore alarms that are part of the ignore list sent as parameter
# and also filter the alarms bases on entity instance id. # and also filter the alarms bases on entity instance id.
# If multiple alarms with the same ID exist, we only return the ID # If multiple alarms with the same ID exist, we only return the ID
@ -154,16 +141,12 @@ class Health(object):
def _check_license(self, version): def _check_license(self, version):
"""Validates the current license is valid for the specified version""" """Validates the current license is valid for the specified version"""
check_binary = "/usr/bin/sm-license-check" check_binary = "/usr/bin/verify-license"
license_file = '/etc/platform/.license' license_file = '/etc/platform/.license'
system = self._dbapi.isystem_get_one()
system_type = system.system_type
system_mode = system.system_mode
with open(os.devnull, "w") as fnull: with open(os.devnull, "w") as fnull:
try: try:
subprocess.check_call([check_binary, license_file, version, subprocess.check_call([check_binary, license_file, version],
system_type, system_mode],
stdout=fnull, stderr=fnull) stdout=fnull, stderr=fnull)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return False return False
@ -205,17 +188,6 @@ class Health(object):
success = running_instances == 0 success = running_instances == 0
return success, running_instances return success, running_instances
def _check_simplex_available_space(self):
"""Ensures there is free space for the backup"""
# TODO: Switch this over to use Ansible
# try:
# backup_restore.check_size("/opt/backups", True)
# except backup_restore.BackupFail:
# return False
# return True
LOG.info("Skip the check of the enough free space.")
def _check_kube_nodes_ready(self): def _check_kube_nodes_ready(self):
"""Checks that each kubernetes node is ready""" """Checks that each kubernetes node is ready"""
fail_node_list = [] fail_node_list = []
@ -419,13 +391,6 @@ class Health(object):
% (running_instances) % (running_instances)
health_ok = health_ok and success health_ok = health_ok and success
else:
success = self._check_simplex_available_space()
output += \
_('Sufficient free space for upgrade: [%s]\n') \
% (Health.SUCCESS_MSG if success else Health.FAIL_MSG)
health_ok = health_ok and success
return health_ok, output return health_ok, output

View File

@ -14,6 +14,7 @@
from __future__ import absolute_import from __future__ import absolute_import
from distutils.version import LooseVersion from distutils.version import LooseVersion
import json import json
import os
import re import re
from kubernetes import config from kubernetes import config
@ -27,6 +28,9 @@ from sysinv.common import exception
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
# Kubernetes Files
KUBERNETES_ADMIN_CONF = '/etc/kubernetes/admin.conf'
# Possible states for each supported kubernetes version # Possible states for each supported kubernetes version
KUBE_STATE_AVAILABLE = 'available' KUBE_STATE_AVAILABLE = 'available'
KUBE_STATE_ACTIVE = 'active' KUBE_STATE_ACTIVE = 'active'
@ -73,7 +77,7 @@ POD_START_INTERVAL = 10
def get_kube_versions(): def get_kube_versions():
"""Provides a list of supported kubernetes versions.""" """Provides a list of supported kubernetes versions."""
return [ return [
{'version': 'v1.16.2', {'version': 'v1.18.1',
'upgrade_from': [], 'upgrade_from': [],
'downgrade_to': [], 'downgrade_to': [],
'applied_patches': [], 'applied_patches': [],
@ -113,6 +117,13 @@ def get_kube_networking_upgrade_version(kube_upgrade):
return kube_upgrade.to_version return kube_upgrade.to_version
def is_k8s_configured():
"""Check to see if the k8s admin config file exists."""
if os.path.isfile(KUBERNETES_ADMIN_CONF):
return True
return False
class KubeOperator(object): class KubeOperator(object):
def __init__(self): def __init__(self):
@ -121,7 +132,10 @@ class KubeOperator(object):
self._kube_client_custom_objects = None self._kube_client_custom_objects = None
def _load_kube_config(self): def _load_kube_config(self):
config.load_kube_config('/etc/kubernetes/admin.conf') if not is_k8s_configured():
raise exception.KubeNotConfigured()
config.load_kube_config(KUBERNETES_ADMIN_CONF)
# Workaround: Turn off SSL/TLS verification # Workaround: Turn off SSL/TLS verification
c = Configuration() c = Configuration()
@ -171,6 +185,21 @@ class KubeOperator(object):
LOG.error("Kubernetes exception in kube_get_nodes: %s" % e) LOG.error("Kubernetes exception in kube_get_nodes: %s" % e)
raise raise
def kube_namespaced_pods_exist(self, namespace):
LOG.debug("kube_namespaced_pods_exist, namespace=%s" %
(namespace))
try:
api_response = self._get_kubernetesclient_core().list_namespaced_pod(
namespace)
if api_response.items:
return True
else:
return False
except ApiException as e:
LOG.error("Kubernetes exception in list_namespaced_pod: %s" % e)
raise
def kube_get_image_by_selector(self, template_name, namespace, container_name): def kube_get_image_by_selector(self, template_name, namespace, container_name):
LOG.debug("kube_get_image_by_selector template_name=%s, namespace=%s" % LOG.debug("kube_get_image_by_selector template_name=%s, namespace=%s" %
(template_name, namespace)) (template_name, namespace))

View File

@ -324,6 +324,19 @@ def _validate_domain(name, value):
(name, value))) (name, value)))
def _validate_admission_plugins(name, value):
"""Check if specified plugins are supported"""
if not value:
raise wsme.exc.ClientSideError(_(
"Please specify at least 1 plugin"))
plugins = value.split(',')
for plugin in plugins:
if plugin not in constants.VALID_ADMISSION_PLUGINS:
raise wsme.exc.ClientSideError(_(
"Invalid admission plugin: '%s'" % plugin))
IDENTITY_CONFIG_PARAMETER_OPTIONAL = [ IDENTITY_CONFIG_PARAMETER_OPTIONAL = [
constants.SERVICE_PARAM_IDENTITY_CONFIG_TOKEN_EXPIRATION, constants.SERVICE_PARAM_IDENTITY_CONFIG_TOKEN_EXPIRATION,
] ]
@ -534,10 +547,12 @@ KUBERNETES_APISERVER_PARAMETER_OPTIONAL = [
constants.SERVICE_PARAM_NAME_OIDC_CLIENT_ID, constants.SERVICE_PARAM_NAME_OIDC_CLIENT_ID,
constants.SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM, constants.SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM,
constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM, constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM,
constants.SERVICE_PARAM_NAME_ADMISSION_PLUGINS,
] ]
KUBERNETES_APISERVER_PARAMETER_VALIDATOR = { KUBERNETES_APISERVER_PARAMETER_VALIDATOR = {
constants.SERVICE_PARAM_NAME_OIDC_ISSUER_URL: _validate_oidc_issuer_url, constants.SERVICE_PARAM_NAME_OIDC_ISSUER_URL: _validate_oidc_issuer_url,
constants.SERVICE_PARAM_NAME_ADMISSION_PLUGINS: _validate_admission_plugins,
} }
KUBERNETES_APISERVER_PARAMETER_RESOURCE = { KUBERNETES_APISERVER_PARAMETER_RESOURCE = {
@ -549,6 +564,8 @@ KUBERNETES_APISERVER_PARAMETER_RESOURCE = {
'platform::kubernetes::params::oidc_username_claim', 'platform::kubernetes::params::oidc_username_claim',
constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM: constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM:
'platform::kubernetes::params::oidc_groups_claim', 'platform::kubernetes::params::oidc_groups_claim',
constants.SERVICE_PARAM_NAME_ADMISSION_PLUGINS:
'platform::kubernetes::params::admission_plugins',
} }
HTTPD_PORT_PARAMETER_OPTIONAL = [ HTTPD_PORT_PARAMETER_OPTIONAL = [

View File

@ -17,7 +17,6 @@ import pecan
from oslo_log import log from oslo_log import log
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import utils as cutils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -485,20 +484,3 @@ class K8RbdProvisioner(object):
base_name = 'ceph-pool' base_name = 'ceph-pool'
return str(base_name + '-' + name) return str(base_name + '-' + name)
@staticmethod
def get_k8s_secret(secret_name, namespace=None):
try:
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
'get', 'secrets', secret_name]
if namespace:
cmd.append('--namespace=%s' % namespace)
stdout, _ = cutils.execute(*cmd, run_as_root=False)
except exception.ProcessExecutionError as e:
if "not found" in e.stderr.lower():
return None
raise exception.SysinvException(
"Error getting secret: %s in namespace: %s, "
"Details: %s" % (secret_name, namespace, str(e)))
return stdout

View File

@ -2216,3 +2216,11 @@ def extract_certs_from_pem(pem_contents):
certs.append(cert) certs.append(cert)
start = start + index + len(marker) start = start + index + len(marker)
return certs return certs
def format_image_filename(device_image):
""" Format device image filename """
return "{}-{}-{}-{}.bit".format(device_image.bitstream_type,
device_image.pci_vendor,
device_image.pci_device,
device_image.uuid)

View File

@ -49,7 +49,7 @@ class CephOperator(object):
self._fm_api = fm_api.FaultAPIs() self._fm_api = fm_api.FaultAPIs()
self._db_api = db_api self._db_api = db_api
self._ceph_api = ceph.CephWrapper( self._ceph_api = ceph.CephWrapper(
endpoint='http://localhost:5001') endpoint='http://localhost:{}'.format(constants.CEPH_MGR_PORT))
self._db_cluster = None self._db_cluster = None
self._db_primary_tier = None self._db_primary_tier = None
self._cluster_name = 'ceph_cluster' self._cluster_name = 'ceph_cluster'

View File

@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # vim: tabstop=4 shiftwidth=4 softtabstop=4
# #
# Copyright (c) 2018-2019 Wind River Systems, Inc. # Copyright (c) 2018-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -41,6 +41,7 @@ from sysinv.common import image_versions
from sysinv.common.retrying import retry from sysinv.common.retrying import retry
from sysinv.common import utils as cutils from sysinv.common import utils as cutils
from sysinv.common.storage_backend_conf import K8RbdProvisioner from sysinv.common.storage_backend_conf import K8RbdProvisioner
from sysinv.common.storage_backend_conf import StorageBackendConfig
from sysinv.conductor import kube_pod_helper as kube_pod from sysinv.conductor import kube_pod_helper as kube_pod
from sysinv.conductor import openstack from sysinv.conductor import openstack
from sysinv.helm import common from sysinv.helm import common
@ -707,7 +708,6 @@ class AppOperator(object):
start = time.time() start = time.time()
try: try:
local_registry_auth = cutils.get_local_docker_registry_auth()
with self._lock: with self._lock:
self._docker._retrieve_specified_registries() self._docker._retrieve_specified_registries()
except Exception as e: except Exception as e:
@ -719,7 +719,7 @@ class AppOperator(object):
pool = greenpool.GreenPool(size=threads) pool = greenpool.GreenPool(size=threads)
for tag, success in pool.imap( for tag, success in pool.imap(
functools.partial(self._docker.download_an_image, functools.partial(self._docker.download_an_image,
app.name, local_registry_auth), app.name),
images_to_download): images_to_download):
if success: if success:
continue continue
@ -915,10 +915,17 @@ class AppOperator(object):
if null_labels: if null_labels:
self._update_kubernetes_labels(host.hostname, null_labels) self._update_kubernetes_labels(host.hostname, null_labels)
def _storage_provisioner_required(self, app_name): def _rbd_provisioner_required(self, app_name):
check_storage_provisioner_apps = [constants.HELM_APP_MONITOR] """ Check if Ceph's RBD provisioner is required """
# Since RBD provisioner requires Ceph, return false when not enabled
if not StorageBackendConfig.has_backend(
self._dbapi,
constants.SB_TYPE_CEPH
):
return False
if app_name not in check_storage_provisioner_apps: check_rbd_provisioner_apps = [constants.HELM_APP_MONITOR]
if app_name not in check_rbd_provisioner_apps:
return True return True
system = self._dbapi.isystem_get_one() system = self._dbapi.isystem_get_one()
@ -928,8 +935,8 @@ class AppOperator(object):
else: else:
return True return True
def _create_storage_provisioner_secrets(self, app_name): def _create_rbd_provisioner_secrets(self, app_name):
""" Provide access to the system persistent storage provisioner. """ Provide access to the system persistent RBD provisioner.
The rbd-provsioner is installed as part of system provisioning and has The rbd-provsioner is installed as part of system provisioning and has
created secrets for all common default namespaces. Copy the secret to created secrets for all common default namespaces. Copy the secret to
@ -947,7 +954,7 @@ class AppOperator(object):
list(set([ns for ns_list in app_ns.values() for ns in ns_list])) list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
for ns in namespaces: for ns in namespaces:
if (ns in [common.HELM_NS_HELM_TOOLKIT, if (ns in [common.HELM_NS_HELM_TOOLKIT,
common.HELM_NS_STORAGE_PROVISIONER] or common.HELM_NS_RBD_PROVISIONER] or
self._kube.kube_get_secret(pool_secret, ns) is not None): self._kube.kube_get_secret(pool_secret, ns) is not None):
# Secret already exist # Secret already exist
continue continue
@ -956,13 +963,13 @@ class AppOperator(object):
if not self._kube.kube_get_namespace(ns): if not self._kube.kube_get_namespace(ns):
self._kube.kube_create_namespace(ns) self._kube.kube_create_namespace(ns)
self._kube.kube_copy_secret( self._kube.kube_copy_secret(
pool_secret, common.HELM_NS_STORAGE_PROVISIONER, ns) pool_secret, common.HELM_NS_RBD_PROVISIONER, ns)
except Exception as e: except Exception as e:
LOG.error(e) LOG.error(e)
raise raise
def _delete_storage_provisioner_secrets(self, app_name): def _delete_rbd_provisioner_secrets(self, app_name):
""" Remove access to the system persistent storage provisioner. """ Remove access to the system persistent RBD provisioner.
As part of launching a supported application, secrets were created to As part of launching a supported application, secrets were created to
allow access to the provisioner from the application namespaces. This allow access to the provisioner from the application namespaces. This
@ -981,7 +988,7 @@ class AppOperator(object):
for ns in namespaces: for ns in namespaces:
if (ns == common.HELM_NS_HELM_TOOLKIT or if (ns == common.HELM_NS_HELM_TOOLKIT or
ns == common.HELM_NS_STORAGE_PROVISIONER): ns == common.HELM_NS_RBD_PROVISIONER):
continue continue
try: try:
@ -1163,6 +1170,28 @@ class AppOperator(object):
LOG.error(e) LOG.error(e)
raise raise
def _wait_for_pod_termination(self, namespace):
loop_timeout = 0
loop_check_interval = 10
timeout = 300
try:
LOG.info("Waiting for pod termination in namespace %s ..." % namespace)
# Pod termination timeout 5mins
while(loop_timeout <= timeout):
if not self._kube.kube_namespaced_pods_exist(namespace):
# Pods have terminated
break
loop_timeout += loop_check_interval
time.sleep(loop_check_interval)
if loop_timeout > timeout:
raise exception.KubePodTerminateTimeout(name=namespace)
LOG.info("Pod termination in Namespace %s completed." % namespace)
except Exception as e:
LOG.error(e)
raise
def _delete_persistent_volume_claim(self, namespace): def _delete_persistent_volume_claim(self, namespace):
try: try:
LOG.info("Deleting Persistent Volume Claim " LOG.info("Deleting Persistent Volume Claim "
@ -1396,7 +1425,7 @@ class AppOperator(object):
# This function gets the "maintain_user_overrides" # This function gets the "maintain_user_overrides"
# parameter from application metadata # parameter from application metadata
reuse_overrides = False reuse_overrides = False
metadata_file = os.path.join(app.path, metadata_file = os.path.join(app.inst_path,
constants.APP_METADATA_FILE) constants.APP_METADATA_FILE)
if os.path.exists(metadata_file) and os.path.getsize(metadata_file) > 0: if os.path.exists(metadata_file) and os.path.getsize(metadata_file) > 0:
with open(metadata_file, 'r') as f: with open(metadata_file, 'r') as f:
@ -1617,7 +1646,7 @@ class AppOperator(object):
# Copy the latest config map # Copy the latest config map
self._kube.kube_copy_config_map( self._kube.kube_copy_config_map(
self.APP_OPENSTACK_RESOURCE_CONFIG_MAP, self.APP_OPENSTACK_RESOURCE_CONFIG_MAP,
common.HELM_NS_STORAGE_PROVISIONER, common.HELM_NS_RBD_PROVISIONER,
common.HELM_NS_OPENSTACK) common.HELM_NS_OPENSTACK)
except Exception as e: except Exception as e:
LOG.error(e) LOG.error(e)
@ -1650,9 +1679,11 @@ class AppOperator(object):
if (app_name == constants.HELM_APP_OPENSTACK and if (app_name == constants.HELM_APP_OPENSTACK and
operation_type == constants.APP_REMOVE_OP): operation_type == constants.APP_REMOVE_OP):
_delete_ceph_persistent_volume_claim(common.HELM_NS_OPENSTACK) _delete_ceph_persistent_volume_claim(common.HELM_NS_OPENSTACK)
elif (app_name == constants.HELM_APP_MONITOR and elif app_name == constants.HELM_APP_MONITOR:
operation_type == constants.APP_DELETE_OP): if operation_type == constants.APP_DELETE_OP:
_delete_ceph_persistent_volume_claim(common.HELM_NS_MONITOR) _delete_ceph_persistent_volume_claim(common.HELM_NS_MONITOR)
elif (operation_type == constants.APP_REMOVE_OP):
self._wait_for_pod_termination(common.HELM_NS_MONITOR)
def _perform_app_recover(self, old_app, new_app, armada_process_required=True): def _perform_app_recover(self, old_app, new_app, armada_process_required=True):
"""Perform application recover """Perform application recover
@ -2037,7 +2068,7 @@ class AppOperator(object):
True) True)
self.clear_reapply(app.name) self.clear_reapply(app.name)
# WORKAROUND: For k8s MatchNodeSelector issue. Look for and clean up any # WORKAROUND: For k8s NodeAffinity issue. Look for and clean up any
# pods that could block manifest apply # pods that could block manifest apply
# #
# Upstream reports of this: # Upstream reports of this:
@ -2046,7 +2077,7 @@ class AppOperator(object):
# #
# Outstanding PR that was tested and fixed this issue: # Outstanding PR that was tested and fixed this issue:
# - https://github.com/kubernetes/kubernetes/pull/80976 # - https://github.com/kubernetes/kubernetes/pull/80976
self._kube_pod.delete_failed_pods_by_reason(reason='MatchNodeSelector') self._kube_pod.delete_failed_pods_by_reason(reason='NodeAffinity')
LOG.info("Application %s (%s) apply started." % (app.name, app.version)) LOG.info("Application %s (%s) apply started." % (app.name, app.version))
@ -2059,8 +2090,8 @@ class AppOperator(object):
if AppOperator.is_app_aborted(app.name): if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort() raise exception.KubeAppAbort()
if self._storage_provisioner_required(app.name): if self._rbd_provisioner_required(app.name):
self._create_storage_provisioner_secrets(app.name) self._create_rbd_provisioner_secrets(app.name)
self._create_app_specific_resources(app.name) self._create_app_specific_resources(app.name)
self._update_app_status( self._update_app_status(
@ -2340,8 +2371,8 @@ class AppOperator(object):
try: try:
self._delete_local_registry_secrets(app.name) self._delete_local_registry_secrets(app.name)
if app.system_app: if app.system_app:
if self._storage_provisioner_required(app.name): if self._rbd_provisioner_required(app.name):
self._delete_storage_provisioner_secrets(app.name) self._delete_rbd_provisioner_secrets(app.name)
self._delete_app_specific_resources(app.name, constants.APP_REMOVE_OP) self._delete_app_specific_resources(app.name, constants.APP_REMOVE_OP)
except Exception as e: except Exception as e:
self._abort_operation(app, constants.APP_REMOVE_OP) self._abort_operation(app, constants.APP_REMOVE_OP)
@ -2734,7 +2765,7 @@ class DockerHelper(object):
# is a work around the permission issue in Armada container. # is a work around the permission issue in Armada container.
kube_config = os.path.join(constants.APP_SYNCED_ARMADA_DATA_PATH, kube_config = os.path.join(constants.APP_SYNCED_ARMADA_DATA_PATH,
'admin.conf') 'admin.conf')
shutil.copy('/etc/kubernetes/admin.conf', kube_config) shutil.copy(kubernetes.KUBERNETES_ADMIN_CONF, kube_config)
os.chown(kube_config, 1000, grp.getgrnam("sys_protected").gr_gid) os.chown(kube_config, 1000, grp.getgrnam("sys_protected").gr_gid)
overrides_dir = common.HELM_OVERRIDES_PATH overrides_dir = common.HELM_OVERRIDES_PATH
@ -2776,6 +2807,9 @@ class DockerHelper(object):
command=None) command=None)
LOG.info("Armada service started!") LOG.info("Armada service started!")
return container return container
except IOError as ie:
if not kubernetes.is_k8s_configured():
LOG.error("Unable to start Armada service: %s" % ie)
except OSError as oe: except OSError as oe:
LOG.error("Unable to make kubernetes config accessible to " LOG.error("Unable to make kubernetes config accessible to "
"armada: %s" % oe) "armada: %s" % oe)
@ -2931,7 +2965,7 @@ class DockerHelper(object):
# Failed to get a docker client # Failed to get a docker client
LOG.error("Failed to stop Armada service : %s " % e) LOG.error("Failed to stop Armada service : %s " % e)
def download_an_image(self, app_name, local_registry_auth, img_tag): def download_an_image(self, app_name, img_tag):
rc = True rc = True
@ -2944,6 +2978,7 @@ class DockerHelper(object):
LOG.info("Image %s download started from local registry" % img_tag) LOG.info("Image %s download started from local registry" % img_tag)
client = docker.APIClient(timeout=INSTALLATION_TIMEOUT) client = docker.APIClient(timeout=INSTALLATION_TIMEOUT)
local_registry_auth = cutils.get_local_docker_registry_auth()
auth = '{0}:{1}'.format(local_registry_auth['username'], auth = '{0}:{1}'.format(local_registry_auth['username'],
local_registry_auth['password']) local_registry_auth['password'])
subprocess.check_call(["crictl", "pull", "--creds", auth, img_tag]) subprocess.check_call(["crictl", "pull", "--creds", auth, img_tag])
@ -2966,6 +3001,9 @@ class DockerHelper(object):
try: try:
# Tag and push the image to the local registry # Tag and push the image to the local registry
client.tag(target_img_tag, img_tag) client.tag(target_img_tag, img_tag)
# admin password may be changed by openstack client cmd in parallel.
# So we cannot cache auth info, need refresh it each time.
local_registry_auth = cutils.get_local_docker_registry_auth()
client.push(img_tag, auth_config=local_registry_auth) client.push(img_tag, auth_config=local_registry_auth)
except Exception as e: except Exception as e:
rc = False rc = False

View File

@ -68,6 +68,7 @@ from oslo_utils import excutils
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from platform_util.license import license from platform_util.license import license
from ruamel import yaml
from sqlalchemy.orm import exc from sqlalchemy.orm import exc
from six.moves import http_client as httplib from six.moves import http_client as httplib
from sysinv._i18n import _ from sysinv._i18n import _
@ -80,6 +81,7 @@ from sysinv.api.controllers.v1 import utils
from sysinv.api.controllers.v1 import vim_api from sysinv.api.controllers.v1 import vim_api
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import ceph as cceph from sysinv.common import ceph as cceph
from sysinv.common import device as dconstants
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import image_versions from sysinv.common import image_versions
from sysinv.common import fm from sysinv.common import fm
@ -182,7 +184,7 @@ class ConductorManager(service.PeriodicService):
self._app = None self._app = None
self._ceph = None self._ceph = None
self._ceph_api = ceph.CephWrapper( self._ceph_api = ceph.CephWrapper(
endpoint='http://localhost:5001') endpoint='http://localhost:{}'.format(constants.CEPH_MGR_PORT))
self._kube = None self._kube = None
self._kube_pod = None self._kube_pod = None
self._fernet = None self._fernet = None
@ -953,32 +955,27 @@ class ConductorManager(service.PeriodicService):
install_output_arg = "-t" install_output_arg = "-t"
install_opts += [install_output_arg] install_opts += [install_output_arg]
# This version check MUST be present. The -u option does not exists # This method is called during upgrades to
# prior to v17.00. This method is also called during upgrades to
# re-generate the host's pxe config files to the appropriate host's # re-generate the host's pxe config files to the appropriate host's
# software version. It is required specifically when we downgrade a # software version. It is required specifically when we downgrade a
# host or when we lock/unlock a host. # host or when we lock/unlock a host.
if sw_version != tsc.SW_VERSION_1610: host_uuid = host.get('uuid')
host_uuid = host.get('uuid') notify_url = \
notify_url = \ "http://pxecontroller:%d/v1/ihosts/%s/install_progress" % \
"http://pxecontroller:%d/v1/ihosts/%s/install_progress" % \ (CONF.sysinv_api_port, host_uuid)
(CONF.sysinv_api_port, host_uuid) install_opts += ['-u', notify_url]
install_opts += ['-u', notify_url]
system = self.dbapi.isystem_get_one() system = self.dbapi.isystem_get_one()
# This version check MUST be present. The -s option secprofile = system.security_profile
# (security profile) does not exist 17.06 and below. # ensure that the securtiy profile selection is valid
if sw_version != tsc.SW_VERSION_1706: if secprofile not in [constants.SYSTEM_SECURITY_PROFILE_STANDARD,
secprofile = system.security_profile constants.SYSTEM_SECURITY_PROFILE_EXTENDED]:
# ensure that the securtiy profile selection is valid LOG.error("Security Profile (%s) not a valid selection. "
if secprofile not in [constants.SYSTEM_SECURITY_PROFILE_STANDARD, "Defaulting to: %s" % (secprofile,
constants.SYSTEM_SECURITY_PROFILE_EXTENDED]: constants.SYSTEM_SECURITY_PROFILE_STANDARD))
LOG.error("Security Profile (%s) not a valid selection. " secprofile = constants.SYSTEM_SECURITY_PROFILE_STANDARD
"Defaulting to: %s" % (secprofile, install_opts += ['-s', secprofile]
constants.SYSTEM_SECURITY_PROFILE_STANDARD))
secprofile = constants.SYSTEM_SECURITY_PROFILE_STANDARD
install_opts += ['-s', secprofile]
# If 'console' is not present in ihost_obj, we want to use the default. # If 'console' is not present in ihost_obj, we want to use the default.
# If, however, it is present and is explicitly set to None or "", then # If, however, it is present and is explicitly set to None or "", then
@ -996,11 +993,7 @@ class ConductorManager(service.PeriodicService):
if tboot is not None and tboot != "": if tboot is not None and tboot != "":
install_opts += ['-T', tboot] install_opts += ['-T', tboot]
# This version check MUST be present. The -k option install_opts += ['-k', system.security_feature]
# (extra_kernel_args) does not exist 18.03 and below.
if sw_version != tsc.SW_VERSION_1706 and \
sw_version != tsc.SW_VERSION_1803:
install_opts += ['-k', system.security_feature]
base_url = "http://pxecontroller:%d" % cutils.get_http_port(self.dbapi) base_url = "http://pxecontroller:%d" % cutils.get_http_port(self.dbapi)
install_opts += ['-l', base_url] install_opts += ['-l', base_url]
@ -1321,9 +1314,9 @@ class ConductorManager(service.PeriodicService):
:param context: request context :param context: request context
:param host: host object :param host: host object
""" """
# Only update the config if the host is running the same version as
# the active controller.
if self.host_load_matches_sw_version(host): if self.host_load_matches_sw_version(host):
# update the config if the host is running the same version as
# the active controller.
if (host.administrative == constants.ADMIN_UNLOCKED or if (host.administrative == constants.ADMIN_UNLOCKED or
host.action == constants.FORCE_UNLOCK_ACTION or host.action == constants.FORCE_UNLOCK_ACTION or
host.action == constants.UNLOCK_ACTION): host.action == constants.UNLOCK_ACTION):
@ -1331,8 +1324,20 @@ class ConductorManager(service.PeriodicService):
# Update host configuration # Update host configuration
self._puppet.update_host_config(host) self._puppet.update_host_config(host)
else: else:
LOG.info("Host %s is not running active load. " # from active controller, update hieradata for upgrade
"Skipping manifest generation" % host.hostname) host_uuids = [host.uuid]
config_uuid = self._config_update_hosts(
context,
[constants.CONTROLLER],
host_uuids,
reboot=True)
host_upgrade = self.dbapi.host_upgrade_get_by_host(host.id)
target_load = self.dbapi.load_get(host_upgrade.target_load)
self._puppet.update_host_config_upgrade(
host,
target_load.software_version,
config_uuid
)
self._allocate_addresses_for_host(context, host) self._allocate_addresses_for_host(context, host)
# Set up the PXE config file for this host so it can run the installer # Set up the PXE config file for this host so it can run the installer
@ -1589,7 +1594,6 @@ class ConductorManager(service.PeriodicService):
if (host.administrative == constants.ADMIN_UNLOCKED or if (host.administrative == constants.ADMIN_UNLOCKED or
host.action == constants.FORCE_UNLOCK_ACTION or host.action == constants.FORCE_UNLOCK_ACTION or
host.action == constants.UNLOCK_ACTION): host.action == constants.UNLOCK_ACTION):
# Generate host configuration files # Generate host configuration files
self._puppet.update_host_config(host) self._puppet.update_host_config(host)
else: else:
@ -2186,9 +2190,27 @@ class ConductorManager(service.PeriodicService):
tlvs = self.dbapi.lldp_tlv_get_by_neighbour(neighbour_uuid) tlvs = self.dbapi.lldp_tlv_get_by_neighbour(neighbour_uuid)
for k, v in tlv_dict.items(): for k, v in tlv_dict.items():
# Since "dot1_vlan_names" has 255 char limit in DB, it
# is necessary to ensure the vlan list from the tlv
# packets does not have length greater than 255 before
# shoving it into DB
if k == constants.LLDP_TLV_TYPE_DOT1_VLAN_NAMES:
# trim the listed vlans to 252 char max
if len(v) >= 256:
# if not perfect trim, remove incomplete ending
perfect_trim = v[252] in list(', ')
v = v[:252]
if not perfect_trim:
v = v[:v.rfind(',') + 1]
# add '...' to indicate there's more
v += '...'
LOG.info("tlv_value trimmed: %s", v)
for tlv in tlvs: for tlv in tlvs:
if tlv['type'] == k: if tlv['type'] == k:
tlv_value = tlv_dict.get(tlv['type']) tlv_value = v
entry = {'type': tlv['type'], entry = {'type': tlv['type'],
'value': tlv_value} 'value': tlv_value}
if tlv['value'] != tlv_value: if tlv['value'] != tlv_value:
@ -4925,6 +4947,9 @@ class ConductorManager(service.PeriodicService):
# Audit kubernetes node labels # Audit kubernetes node labels
self._audit_kubernetes_labels(hosts) self._audit_kubernetes_labels(hosts)
# Audit image conversion
self._audit_image_conversion(hosts)
for host in hosts: for host in hosts:
# only audit configured hosts # only audit configured hosts
if not host.personality: if not host.personality:
@ -4994,6 +5019,49 @@ class ConductorManager(service.PeriodicService):
elif bk.backend in self._stor_bck_op_timeouts: elif bk.backend in self._stor_bck_op_timeouts:
del self._stor_bck_op_timeouts[bk.backend] del self._stor_bck_op_timeouts[bk.backend]
def _audit_image_conversion(self, hosts):
"""
Raise alarm if:
- image-conversion is not added on both controllers;
- the size of the filesystem is not the same
on both controllers
"""
chosts = [h for h in hosts if h.personality == constants.CONTROLLER]
if len(chosts) <= 1:
# No alarm is raised if setup has only one controller
return
conversion_list = []
for host in chosts:
hostfs_list = self.dbapi.host_fs_get_by_ihost(host.uuid)
for host_fs in hostfs_list:
if host_fs['name'] == constants.FILESYSTEM_NAME_IMAGE_CONVERSION:
conversion_list.append(host_fs['size'])
reason_text = "image-conversion must be added on both controllers"
if not conversion_list:
# If no conversion filesystem is present on any host
# any alarm present is cleared
self._update_image_conversion_alarm(fm_constants.FM_ALARM_STATE_CLEAR,
constants.FILESYSTEM_NAME_IMAGE_CONVERSION)
elif (len(conversion_list) == 1):
self._update_image_conversion_alarm(fm_constants.FM_ALARM_STATE_SET,
constants.FILESYSTEM_NAME_IMAGE_CONVERSION,
reason_text)
else:
# If conversion filesystem is present on both controllers
# with different sizes
self._update_image_conversion_alarm(fm_constants.FM_ALARM_STATE_CLEAR,
constants.FILESYSTEM_NAME_IMAGE_CONVERSION)
if (conversion_list[0] != conversion_list[1]):
reason_text = "image-conversion size must be the same on both controllers"
self._update_image_conversion_alarm(fm_constants.FM_ALARM_STATE_SET,
constants.FILESYSTEM_NAME_IMAGE_CONVERSION,
reason_text)
elif conversion_list[0] == conversion_list[1]:
self._update_image_conversion_alarm(fm_constants.FM_ALARM_STATE_CLEAR,
constants.FILESYSTEM_NAME_IMAGE_CONVERSION)
def _auto_upload_managed_app(self, context, app_name): def _auto_upload_managed_app(self, context, app_name):
if self._patching_operation_is_occurring(): if self._patching_operation_is_occurring():
return return
@ -5193,9 +5261,11 @@ class ConductorManager(service.PeriodicService):
(active_ctrl.operational != constants.OPERATIONAL_ENABLED))): (active_ctrl.operational != constants.OPERATIONAL_ENABLED))):
return return
# WORKAROUND: For k8s MatchNodeSelector issue. Call this for a limited # WORKAROUND: For k8s NodeAffinity issue. Call this for a limited time
# time (5 times over ~5 minutes) on a AIO-SX controller # (5 times over ~5 minutes). As of k8s upgrade to v1.18.1,
# configuration after conductor startup. # this condition is occurring on simplex and duplex
# controller scenarios and has been observed with initial
# unlocks and uncontrolled system reboots
# #
# Upstream reports of this: # Upstream reports of this:
# - https://github.com/kubernetes/kubernetes/issues/80745 # - https://github.com/kubernetes/kubernetes/issues/80745
@ -5203,16 +5273,14 @@ class ConductorManager(service.PeriodicService):
# #
# Outstanding PR that was tested and fixed this issue: # Outstanding PR that was tested and fixed this issue:
# - https://github.com/kubernetes/kubernetes/pull/80976 # - https://github.com/kubernetes/kubernetes/pull/80976
system_mode = self.dbapi.isystem_get_one().system_mode if (self._start_time + timedelta(minutes=5) >
if system_mode == constants.SYSTEM_MODE_SIMPLEX: datetime.now(self._start_time.tzinfo)):
if (self._start_time + timedelta(minutes=5) > LOG.info("Periodic Task: _k8s_application_audit: Checking for "
datetime.now(self._start_time.tzinfo)): "NodeAffinity issue for %s" % str(
LOG.info("Periodic Task: _k8s_application_audit: Checking for " (self._start_time + timedelta(minutes=5)) -
"MatchNodeSelector issue for %s" % str( datetime.now(self._start_time.tzinfo)))
(self._start_time + timedelta(minutes=5)) - self._kube_pod.delete_failed_pods_by_reason(
datetime.now(self._start_time.tzinfo))) reason='NodeAffinity')
self._kube_pod.delete_failed_pods_by_reason(
reason='MatchNodeSelector')
# Check the application state and take the approprate action # Check the application state and take the approprate action
for app_name in constants.HELM_APPS_PLATFORM_MANAGED: for app_name in constants.HELM_APPS_PLATFORM_MANAGED:
@ -5276,7 +5344,63 @@ class ConductorManager(service.PeriodicService):
return return
self.reapply_app(context, app_name) self.reapply_app(context, app_name)
def _patch_tiller_deployment(self):
""" Ensure tiller is patched with restart logic."""
LOG.info("Attempt to patch tiller deployment")
try:
# We have a race condition that may cause the tiller pod to not have
# its environment set up correctly. This will patch the tiller
# deployment to ensure that tiller can recover if that occurs. The
# deployment is patched during the initial ansible run. This will
# re-patch the deployment in the case when tiller has been removed
# and reinstalled in the cluster after the system has been
# installed. If tiller is already patched then the patch execution
# is successful causing no change to the deployment. Specify the
# update strategy to allow tiller deployment patching in a simplex
# controller configuration.
patch = {
'spec': {
'strategy': {
'type': 'RollingUpdate',
'rollingUpdate': {
'maxUnavailable': 1,
'maxSurge': 1,
}
},
'template': {
'spec': {
'containers': [{
'name': 'tiller',
'command': [
'/bin/sh',
'-cex',
'#!/bin/sh\n'
'env | grep -q -e ^TILLER_DEPLOY || exit\n'
'env | grep -q -e ^KUBE_DNS || exit\n'
'env | grep -q -e ^KUBERNETES_PORT || exit\n'
'env | grep -q -e ^KUBERNETES_SERVICE || exit\n'
'/tiller\n'
]
}]
}
}
}
}
cmd = ['kubectl',
'--kubeconfig={}'.format(kubernetes.KUBERNETES_ADMIN_CONF),
'patch', 'deployment', '-n', 'kube-system', 'tiller-deploy',
'-p', yaml.dump(patch)]
stdout, stderr = cutils.execute(*cmd, run_as_root=False)
except exception.ProcessExecutionError as e:
raise exception.SysinvException(
_("Error patching the tiller deployment, "
"Details: %s") % str(e))
LOG.info("Tiller deployment has been patched")
def _upgrade_downgrade_kube_components(self): def _upgrade_downgrade_kube_components(self):
self._upgrade_downgrade_static_images()
self._upgrade_downgrade_tiller() self._upgrade_downgrade_tiller()
self._upgrade_downgrade_kube_networking() self._upgrade_downgrade_kube_networking()
@ -5342,13 +5466,11 @@ class ConductorManager(service.PeriodicService):
"Upgrade in progress." "Upgrade in progress."
% image_versions.TILLER_IMAGE_VERSION) % image_versions.TILLER_IMAGE_VERSION)
download_image = running_image_name + ":" + image_versions.TILLER_IMAGE_VERSION download_image = running_image_name + ":" + image_versions.TILLER_IMAGE_VERSION
local_registry_auth = cutils.get_local_docker_registry_auth()
self._docker._retrieve_specified_registries() self._docker._retrieve_specified_registries()
# download the image # download the image
try: try:
img_tag, ret = self._docker.download_an_image("helm", img_tag, ret = self._docker.download_an_image("helm",
local_registry_auth,
download_image) download_image)
if not ret: if not ret:
raise Exception raise Exception
@ -5371,6 +5493,16 @@ class ConductorManager(service.PeriodicService):
LOG.error("{}. Failed to upgrade/downgrade tiller.".format(e)) LOG.error("{}. Failed to upgrade/downgrade tiller.".format(e))
return False return False
# Patch tiller to allow restarts if the environment is incomplete
#
# NOTE: This patch along with this upgrade functionality can be removed
# once StarlingX moves to Helm v3
try:
self._patch_tiller_deployment()
except Exception as e:
LOG.error("{}. Failed to patch tiller deployment.".format(e))
return False
return True return True
@retry(retry_on_result=lambda x: x is False, @retry(retry_on_result=lambda x: x is False,
@ -5413,6 +5545,46 @@ class ConductorManager(service.PeriodicService):
return True return True
@retry(retry_on_result=lambda x: x is False,
wait_fixed=(CONF.conductor.kube_upgrade_downgrade_retry_interval * 1000))
def _upgrade_downgrade_static_images(self):
try:
# Get the kubernetes version from the upgrade table
# if an upgrade exists
kube_upgrade = self.dbapi.kube_upgrade_get_one()
kube_version = \
kubernetes.get_kube_networking_upgrade_version(kube_upgrade)
except exception.NotFound:
# Not upgrading kubernetes, get the kubernetes version
# from the kubeadm config map
kube_version = self._kube.kube_get_kubernetes_version()
if not kube_version:
LOG.error("Unable to get the current kubernetes version.")
return False
try:
LOG.info("_upgrade_downgrade_kube_static_images executing"
" playbook: %s for version %s" %
(constants.ANSIBLE_KUBE_STATIC_IMAGES_PLAYBOOK, kube_version))
proc = subprocess.Popen(
['ansible-playbook', '-e', 'kubernetes_version=%s' % kube_version,
constants.ANSIBLE_KUBE_STATIC_IMAGES_PLAYBOOK],
stdout=subprocess.PIPE)
out, _ = proc.communicate()
LOG.info("ansible-playbook: %s." % out)
if proc.returncode:
raise Exception("ansible-playbook returned an error: %s" % proc.returncode)
except Exception as e:
LOG.error("Failed to upgrade/downgrade kubernetes "
"static images: {}".format(e))
return False
return True
def check_nodes_stable(self): def check_nodes_stable(self):
hosts = self.dbapi.ihost_get_list() hosts = self.dbapi.ihost_get_list()
if (utils.is_host_simplex_controller(hosts[0]) and if (utils.is_host_simplex_controller(hosts[0]) and
@ -5449,7 +5621,8 @@ class ConductorManager(service.PeriodicService):
:returns: list of namespaces :returns: list of namespaces
""" """
try: try:
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf', cmd = ['kubectl',
'--kubeconfig={}'.format(kubernetes.KUBERNETES_ADMIN_CONF),
'get', 'namespaces', '-o', 'get', 'namespaces', '-o',
'go-template=\'{{range .items}}{{.metadata.name}}\'{{end}}\''] 'go-template=\'{{range .items}}{{.metadata.name}}\'{{end}}\'']
stdout, stderr = cutils.execute(*cmd, run_as_root=False) stdout, stderr = cutils.execute(*cmd, run_as_root=False)
@ -5883,6 +6056,12 @@ class ConductorManager(service.PeriodicService):
} }
self._config_apply_runtime_manifest(context, config_uuid, config_dict) self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def update_controller_upgrade_flag(self, context):
"""Update the controller upgrade flag"""
LOG.info("update_controller_upgrade_flag")
cutils.touch(tsc.CONTROLLER_UPGRADE_FLAG)
def update_storage_config(self, context, def update_storage_config(self, context,
update_storage=False, update_storage=False,
reinstall_required=False, reinstall_required=False,
@ -5926,8 +6105,8 @@ class ConductorManager(service.PeriodicService):
'platform::drbd::platform::runtime', 'platform::drbd::platform::runtime',
constants.FILESYSTEM_NAME_EXTENSION: constants.FILESYSTEM_NAME_EXTENSION:
'platform::drbd::extension::runtime', 'platform::drbd::extension::runtime',
constants.FILESYSTEM_NAME_PATCH_VAULT: constants.FILESYSTEM_NAME_DC_VAULT:
'platform::drbd::patch_vault::runtime', 'platform::drbd::dc_vault::runtime',
constants.FILESYSTEM_NAME_ETCD: constants.FILESYSTEM_NAME_ETCD:
'platform::drbd::etcd::runtime', 'platform::drbd::etcd::runtime',
} }
@ -5969,6 +6148,8 @@ class ConductorManager(service.PeriodicService):
'platform::filesystem::docker::runtime', 'platform::filesystem::docker::runtime',
constants.FILESYSTEM_NAME_KUBELET: constants.FILESYSTEM_NAME_KUBELET:
'platform::filesystem::kubelet::runtime', 'platform::filesystem::kubelet::runtime',
constants.FILESYSTEM_NAME_IMAGE_CONVERSION:
'platform::filesystem::conversion::runtime',
} }
puppet_class = [classmap.get(fs) for fs in filesystem_list] puppet_class = [classmap.get(fs) for fs in filesystem_list]
@ -6337,6 +6518,31 @@ class ConductorManager(service.PeriodicService):
'task': None} 'task': None}
self.dbapi.storage_ceph_external_update(sb_uuid, values) self.dbapi.storage_ceph_external_update(sb_uuid, values)
def _update_image_conversion_alarm(self, alarm_state, fs_name, reason_text=None):
""" Raise conversion configuration alarm"""
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_IMAGE_CONVERSION,
fs_name)
if alarm_state == fm_constants.FM_ALARM_STATE_SET:
fault = fm_api.Fault(
alarm_id=fm_constants.FM_ALARM_ID_IMAGE_CONVERSION,
alarm_state=alarm_state,
entity_type_id=fm_constants.FM_ENTITY_TYPE_IMAGE_CONVERSION,
entity_instance_id=entity_instance_id,
severity=fm_constants.FM_ALARM_SEVERITY_CRITICAL,
reason_text=reason_text,
alarm_type=fm_constants.FM_ALARM_TYPE_4,
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_7,
proposed_repair_action=_("Add image-conversion filesystem on both controllers."
"Consult the System Administration Manual "
"for more details. If problem persists, "
"contact next level of support."),
service_affecting=True)
self.fm_api.set_fault(fault)
else:
self.fm_api.clear_fault(fm_constants.FM_ALARM_ID_IMAGE_CONVERSION,
entity_instance_id)
def _update_storage_backend_alarm(self, alarm_state, backend, reason_text=None): def _update_storage_backend_alarm(self, alarm_state, backend, reason_text=None):
""" Update storage backend configuration alarm""" """ Update storage backend configuration alarm"""
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_STORAGE_BACKEND, entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_STORAGE_BACKEND,
@ -7219,10 +7425,6 @@ class ConductorManager(service.PeriodicService):
# Defaults: 500G root disk # Defaults: 500G root disk
# #
# Min size of the cgts-vg PV is:
# 167.0 G - PV for cgts-vg (specified in the kickstart)
# or
# 163.0 G - (for DCSC non-AIO)
# 8 G - /var/log (reserved in kickstart) # 8 G - /var/log (reserved in kickstart)
# 8 G - /scratch (reserved in kickstart) # 8 G - /scratch (reserved in kickstart)
# 2 G - pgsql_lv (DRBD bootstrap manifest) # 2 G - pgsql_lv (DRBD bootstrap manifest)
@ -7245,32 +7447,18 @@ class ConductorManager(service.PeriodicService):
# 16 G - /var/lib/docker-distribution (--kubernetes) # 16 G - /var/lib/docker-distribution (--kubernetes)
# 5 G - /opt/etcd (--kubernetes) # 5 G - /opt/etcd (--kubernetes)
# 20 G - /var/lib/ceph/mon (--kubernetes) # 20 G - /var/lib/ceph/mon (--kubernetes)
# 8 G - /opt/patch-vault (DRBD ctlr manifest for # 15 G - /opt/dc-vault (DRBD ctlr manifest for
# Distributed Cloud System Controller non-AIO only) # Distributed Cloud System Controller)
# ----- # -----
# 163 G (for DCSC non-AIO) or 167 # 160 G
# #
# The absolute minimum disk size for these default settings: # The absolute minimum disk size for these default settings:
# 0.5 G - /boot # 0.5 G - /boot
# 10.0 G - /opt/platform-backup
# 20.0 G - / # 20.0 G - /
# 167.0 G - cgts-vg PV # 160.0 G - cgts-vg PV
# or 163.0 G - (DCSC non-AIO)
# ------- # -------
# 183.5 G => ~184G min size disk # 190.5 G min size disk
# or
# 187.5 G => ~188G min size disk
#
# If required disk is size 500G:
# 1) Standard controller - will use all free space for the PV
# 0.5 G - /boot
# 20.0 G - /
# 479.5 G - cgts-vg PV
#
# 2) AIO - will leave unused space for further partitioning
# 0.5 G - /boot
# 20.0 G - /
# 167.0 G - cgts-vg PV
# 312.5 G - unpartitioned free space
# #
database_storage = constants.DEFAULT_DATABASE_STOR_SIZE database_storage = constants.DEFAULT_DATABASE_STOR_SIZE
@ -7280,10 +7468,6 @@ class ConductorManager(service.PeriodicService):
# Small disk: under 240G root disk # Small disk: under 240G root disk
# #
# Min size of the cgts-vg PV is:
# 135.0 G - PV for cgts-vg (specified in the kickstart)
# or
# 133.0 G - (for DCSC non-AIO)
# 8 G - /var/log (reserved in kickstart) # 8 G - /var/log (reserved in kickstart)
# 8 G - /scratch (reserved in kickstart) # 8 G - /scratch (reserved in kickstart)
# 2 G - pgsql_lv (DRBD bootstrap manifest) # 2 G - pgsql_lv (DRBD bootstrap manifest)
@ -7306,31 +7490,17 @@ class ConductorManager(service.PeriodicService):
# 16 G - /var/lib/docker-distribution (--kubernetes) # 16 G - /var/lib/docker-distribution (--kubernetes)
# 20 G - /var/lib/ceph/mon (--kubernetes) # 20 G - /var/lib/ceph/mon (--kubernetes)
# 5 G - /opt/etcd (--kubernetes) # 5 G - /opt/etcd (--kubernetes)
# 8 G - /opt/patch-vault (DRBD ctlr manifest for DCSC non-AIO only) # 15 G - /opt/dc-vault (DRBD ctlr manifest for DCSC)
# ----- # -----
# 138 G (for DCSC non-AIO) or 140 G # 145 G
# #
# The absolute minimum disk size for these default settings: # The absolute minimum disk size for these default settings:
# 0.5 G - /boot # 0.5 G - /boot
# 10.0 G - /opt/platform-backup
# 20.0 G - / # 20.0 G - /
# 140.0 G - cgts-vg PV # 145.0 G - cgts-vg PV
# or
# 138.0 G - (for DCSC non-AIO)
# ------- # -------
# 160.5 G => ~156G min size disk # 175.5 G min size disk
# or
# 158.5 G => ~154G min size disk
#
# If required disk is size 240G:
# 1) Standard controller - will use all free space for the PV
# 0.5 G - /boot
# 20.0 G - /
# 219.5 G - cgts-vg PV
# 2) AIO - will leave unused space for further partitioning
# 0.5 G - /boot
# 20.0 G - /
# 151.0 G - cgts-vg PV
# 68.5 G - unpartitioned free space
# #
database_storage = \ database_storage = \
constants.DEFAULT_SMALL_DATABASE_STOR_SIZE constants.DEFAULT_SMALL_DATABASE_STOR_SIZE
@ -7399,10 +7569,10 @@ class ConductorManager(service.PeriodicService):
if system_dc_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: if system_dc_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
data = { data = {
'name': constants.FILESYSTEM_NAME_PATCH_VAULT, 'name': constants.FILESYSTEM_NAME_DC_VAULT,
'size': constants.DEFAULT_PATCH_VAULT_STOR_SIZE, 'size': constants.DEFAULT_DC_VAULT_STOR_SIZE,
'logical_volume': constants.FILESYSTEM_LV_DICT[ 'logical_volume': constants.FILESYSTEM_LV_DICT[
constants.FILESYSTEM_NAME_PATCH_VAULT], constants.FILESYSTEM_NAME_DC_VAULT],
'replicated': True, 'replicated': True,
} }
LOG.info("Creating FS:%s:%s %d" % ( LOG.info("Creating FS:%s:%s %d" % (
@ -7692,8 +7862,8 @@ class ConductorManager(service.PeriodicService):
fs.append(constants.DRBD_PLATFORM) fs.append(constants.DRBD_PLATFORM)
if "drbd-extension" in row and ("SyncSource" in row or "PausedSyncS" in row): if "drbd-extension" in row and ("SyncSource" in row or "PausedSyncS" in row):
fs.append(constants.DRBD_EXTENSION) fs.append(constants.DRBD_EXTENSION)
if "drbd-patch-vault" in row and ("SyncSource" in row or "PausedSyncS" in row): if "drbd-dc-vault" in row and ("SyncSource" in row or "PausedSyncS" in row):
fs.append(constants.DRBD_PATCH_VAULT) fs.append(constants.DRBD_DC_VAULT)
if "drbd-etcd" in row and ("SyncSource" in row or "PausedSyncS" in row): if "drbd-etcd" in row and ("SyncSource" in row or "PausedSyncS" in row):
fs.append(constants.DRBD_ETCD) fs.append(constants.DRBD_ETCD)
if "drbd-dockerdistribution" in row and ("SyncSource" in row or "PausedSyncS" in row): if "drbd-dockerdistribution" in row and ("SyncSource" in row or "PausedSyncS" in row):
@ -7706,7 +7876,7 @@ class ConductorManager(service.PeriodicService):
drbd_dict = [_f for _f in drbd_dict.split('\n') if _f] drbd_dict = [_f for _f in drbd_dict.split('\n') if _f]
drbd_patch_size = 0 drbd_patch_size = 0
patch_lv_size = 0 dc_lv_size = 0
dockerdistribution_size = 0 dockerdistribution_size = 0
dockerdistribution_lv_size = 0 dockerdistribution_lv_size = 0
drbd_etcd_size = 0 drbd_etcd_size = 0
@ -7734,7 +7904,7 @@ class ConductorManager(service.PeriodicService):
drbd_platform_size = size drbd_platform_size = size
if 'drbd-extension' in row: if 'drbd-extension' in row:
drbd_extension_size = size drbd_extension_size = size
if 'drbd-patch-vault' in row: if 'drbd-dc-vault' in row:
drbd_patch_size = size drbd_patch_size = size
if 'drbd-etcd' in row: if 'drbd-etcd' in row:
drbd_etcd_size = size drbd_etcd_size = size
@ -7748,21 +7918,21 @@ class ConductorManager(service.PeriodicService):
platform_lv_size = float(lvdisplay_dict['platform-lv']) platform_lv_size = float(lvdisplay_dict['platform-lv'])
if lvdisplay_dict.get('extension-lv', None): if lvdisplay_dict.get('extension-lv', None):
extension_lv_size = float(lvdisplay_dict['extension-lv']) extension_lv_size = float(lvdisplay_dict['extension-lv'])
if lvdisplay_dict.get('patch-vault-lv', None): if lvdisplay_dict.get('dc-vault-lv', None):
patch_lv_size = float(lvdisplay_dict['patch-vault-lv']) dc_lv_size = float(lvdisplay_dict['dc-vault-lv'])
if lvdisplay_dict.get('etcd-lv', None): if lvdisplay_dict.get('etcd-lv', None):
etcd_lv_size = float(lvdisplay_dict['etcd-lv']) etcd_lv_size = float(lvdisplay_dict['etcd-lv'])
if lvdisplay_dict.get('dockerdistribution-lv', None): if lvdisplay_dict.get('dockerdistribution-lv', None):
dockerdistribution_lv_size = float(lvdisplay_dict['dockerdistribution-lv']) dockerdistribution_lv_size = float(lvdisplay_dict['dockerdistribution-lv'])
LOG.info("drbd-overview: pgsql-%s, platform-%s, extension-%s," LOG.info("drbd-overview: pgsql-%s, platform-%s, extension-%s,"
" patch-vault-%s, etcd-%s, dockerdistribution-%s", " dc-vault-%s, etcd-%s, dockerdistribution-%s",
drbd_pgsql_size, drbd_platform_size, drbd_extension_size, drbd_pgsql_size, drbd_platform_size, drbd_extension_size,
drbd_patch_size, drbd_etcd_size, dockerdistribution_size) drbd_patch_size, drbd_etcd_size, dockerdistribution_size)
LOG.info("lvdisplay: pgsql-%s, platform-%s, extension-%s," LOG.info("lvdisplay: pgsql-%s, platform-%s, extension-%s,"
" patch-vault-%s, etcd-%s, dockerdistribution-%s", " dc-vault-%s, etcd-%s, dockerdistribution-%s",
pgsql_lv_size, platform_lv_size, extension_lv_size, pgsql_lv_size, platform_lv_size, extension_lv_size,
patch_lv_size, etcd_lv_size, dockerdistribution_lv_size) dc_lv_size, etcd_lv_size, dockerdistribution_lv_size)
drbd_fs_updated = [] drbd_fs_updated = []
if math.ceil(drbd_pgsql_size) < math.ceil(pgsql_lv_size): if math.ceil(drbd_pgsql_size) < math.ceil(pgsql_lv_size):
@ -7771,8 +7941,8 @@ class ConductorManager(service.PeriodicService):
drbd_fs_updated.append(constants.DRBD_PLATFORM) drbd_fs_updated.append(constants.DRBD_PLATFORM)
if math.ceil(drbd_extension_size) < math.ceil(extension_lv_size): if math.ceil(drbd_extension_size) < math.ceil(extension_lv_size):
drbd_fs_updated.append(constants.DRBD_EXTENSION) drbd_fs_updated.append(constants.DRBD_EXTENSION)
if math.ceil(drbd_patch_size) < math.ceil(patch_lv_size): if math.ceil(drbd_patch_size) < math.ceil(dc_lv_size):
drbd_fs_updated.append(constants.DRBD_PATCH_VAULT) drbd_fs_updated.append(constants.DRBD_DC_VAULT)
if math.ceil(drbd_etcd_size) < math.ceil(etcd_lv_size): if math.ceil(drbd_etcd_size) < math.ceil(etcd_lv_size):
drbd_fs_updated.append(constants.DRBD_ETCD) drbd_fs_updated.append(constants.DRBD_ETCD)
if math.ceil(dockerdistribution_size) < math.ceil(dockerdistribution_lv_size): if math.ceil(dockerdistribution_size) < math.ceil(dockerdistribution_lv_size):
@ -7848,11 +8018,11 @@ class ConductorManager(service.PeriodicService):
LOG.info("Performed %s" % progress) LOG.info("Performed %s" % progress)
extension_resized = True extension_resized = True
if constants.DRBD_PATCH_VAULT in drbd_fs_updated: if constants.DRBD_DC_VAULT in drbd_fs_updated:
if (not patch_resized and if (not patch_resized and
(not standby_host or (standby_host and (not standby_host or (standby_host and
constants.DRBD_PATCH_VAULT in self._drbd_fs_sync()))): constants.DRBD_DC_VAULT in self._drbd_fs_sync()))):
# patch_gib /opt/patch-vault # patch_gib /opt/dc-vault
progress = "resize2fs drbd6" progress = "resize2fs drbd6"
cmd = ["resize2fs", "/dev/drbd6"] cmd = ["resize2fs", "/dev/drbd6"]
stdout, __ = cutils.execute(*cmd, attempts=retry_attempts, run_as_root=True) stdout, __ = cutils.execute(*cmd, attempts=retry_attempts, run_as_root=True)
@ -7893,7 +8063,7 @@ class ConductorManager(service.PeriodicService):
all_resized = False all_resized = False
elif drbd == constants.DRBD_EXTENSION and not extension_resized: elif drbd == constants.DRBD_EXTENSION and not extension_resized:
all_resized = False all_resized = False
elif drbd == constants.DRBD_PATCH_VAULT and not patch_resized: elif drbd == constants.DRBD_DC_VAULT and not patch_resized:
all_resized = False all_resized = False
elif drbd == constants.DRBD_ETCD and not etcd_resized: elif drbd == constants.DRBD_ETCD and not etcd_resized:
all_resized = False all_resized = False
@ -8797,7 +8967,8 @@ class ConductorManager(service.PeriodicService):
for upgrade_element in upgrade_paths: for upgrade_element in upgrade_paths:
valid_from_version = upgrade_element.findtext('version') valid_from_version = upgrade_element.findtext('version')
if valid_from_version == current_version: valid_from_versions = valid_from_version.split(",")
if current_version in valid_from_versions:
path_found = True path_found = True
upgrade_path = upgrade_element upgrade_path = upgrade_element
break break
@ -8934,9 +9105,9 @@ class ConductorManager(service.PeriodicService):
"Failure during sw-patch del-release")) "Failure during sw-patch del-release"))
# delete the central patch vault if it exists # delete the central patch vault if it exists
patch_vault = '/opt/patch-vault/' + load.software_version dc_vault = '/opt/dc-vault/' + load.software_version
if os.path.exists(patch_vault): if os.path.exists(dc_vault):
shutil.rmtree(patch_vault) shutil.rmtree(dc_vault)
cleanup_script = constants.DELETE_LOAD_SCRIPT cleanup_script = constants.DELETE_LOAD_SCRIPT
if os.path.isfile(cleanup_script): if os.path.isfile(cleanup_script):
@ -9161,31 +9332,6 @@ class ConductorManager(service.PeriodicService):
controller_0 = self.dbapi.ihost_get_by_hostname( controller_0 = self.dbapi.ihost_get_by_hostname(
constants.CONTROLLER_0_HOSTNAME) constants.CONTROLLER_0_HOSTNAME)
# TODO: This code is only useful for supporting R5 to R6 upgrades.
# Remove in future release.
# update crushmap and remove cache-tier on upgrade
if from_version == tsc.SW_VERSION_1803:
ceph_backend = StorageBackendConfig.get_backend(self.dbapi, constants.CINDER_BACKEND_CEPH)
if ceph_backend and ceph_backend.state == constants.SB_STATE_CONFIGURED:
try:
response, body = self._ceph_api.osd_crush_rule_rm("cache_tier_ruleset",
body='json')
if response.ok:
LOG.info("Successfully removed cache_tier_ruleset "
"[ceph osd crush rule rm cache_tier_ruleset]")
try:
response, body = self._ceph_api.osd_crush_remove("cache-tier",
body='json')
if response.ok:
LOG.info("Successfully removed cache_tier "
"[ceph osd crush remove cache-tier]")
except exception.CephFailure:
LOG.warn("Failed to remove bucket cache-tier from crushmap")
pass
except exception.CephFailure:
LOG.warn("Failed to remove rule cache-tier from crushmap")
pass
if state in [constants.UPGRADE_ABORTING, if state in [constants.UPGRADE_ABORTING,
constants.UPGRADE_ABORTING_ROLLBACK]: constants.UPGRADE_ABORTING_ROLLBACK]:
if upgrade.state != constants.UPGRADE_ABORT_COMPLETING: if upgrade.state != constants.UPGRADE_ABORT_COMPLETING:
@ -9433,7 +9579,7 @@ class ConductorManager(service.PeriodicService):
""" """
Checks if the host is running the same load as the active controller Checks if the host is running the same load as the active controller
:param host: a host object :param host: a host object
:return: true if host target load matches active sw_version :return: True if host target load matches active sw_version
""" """
host_upgrade = self.dbapi.host_upgrade_get_by_host(host.id) host_upgrade = self.dbapi.host_upgrade_get_by_host(host.id)
target_load = self.dbapi.load_get(host_upgrade.target_load) target_load = self.dbapi.load_get(host_upgrade.target_load)
@ -9486,7 +9632,7 @@ class ConductorManager(service.PeriodicService):
'/dev/cgts-vg/dockerdistribution-lv ' '/dev/cgts-vg/dockerdistribution-lv '
if system_dc_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: if system_dc_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
lvdisplay_command = lvdisplay_command + '/dev/cgts-vg/patch-vault-lv ' lvdisplay_command = lvdisplay_command + '/dev/cgts-vg/dc-vault-lv '
lvdisplay_dict = {} lvdisplay_dict = {}
# Execute the command. # Execute the command.
@ -11343,3 +11489,37 @@ class ConductorManager(service.PeriodicService):
kube_upgrade_obj = objects.kube_upgrade.get_one(context) kube_upgrade_obj = objects.kube_upgrade.get_one(context)
kube_upgrade_obj.state = kubernetes.KUBE_UPGRADED_NETWORKING kube_upgrade_obj.state = kubernetes.KUBE_UPGRADED_NETWORKING
kube_upgrade_obj.save() kube_upgrade_obj.save()
def store_bitstream_file(self, context, filename):
"""Store FPGA bitstream file """
image_file_path = os.path.join(dconstants.DEVICE_IMAGE_PATH, filename)
image_tmp_path = os.path.join(dconstants.DEVICE_IMAGE_TMP_PATH, filename)
try:
os.makedirs(dconstants.DEVICE_IMAGE_PATH)
except OSError as oe:
if (oe.errno != errno.EEXIST or
not os.path.isdir(dconstants.DEVICE_IMAGE_PATH)):
LOG.error("Failed to create dir %s" % dconstants.DEVICE_IMAGE_PATH)
raise
shutil.copyfile(image_tmp_path, image_file_path)
LOG.info("copied %s to %s" % (image_tmp_path, image_file_path))
def delete_bitstream_file(self, context, filename):
"""Delete FPGA bitstream file"""
image_file_path = os.path.join(dconstants.DEVICE_IMAGE_PATH, filename)
try:
os.remove(image_file_path)
except OSError:
LOG.exception("Failed to delete bitstream file %s" % image_file_path)
def host_device_image_update(self, context, host_uuid):
"""Update the device image on this host"""
host_obj = objects.host.get_by_uuid(context, host_uuid)
LOG.info("Updating device image on %s" % host_obj.hostname)
def host_device_image_update_abort(self, context, host_uuid):
"""Abort device image update on this host"""
host_obj = objects.host.get_by_uuid(context, host_uuid)
LOG.info("Aborting device image update on %s" % host_obj.hostname)

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
""" """
@ -787,6 +787,14 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
""" """
return self.call(context, self.make_msg('update_user_config')) return self.call(context, self.make_msg('update_user_config'))
def update_controller_upgrade_flag(self, context):
"""Synchronously, have a conductor update controller upgrade flag
:param context: request context
"""
return self.call(context,
self.make_msg('update_controller_upgrade_flag'))
def update_storage_config(self, context, update_storage=False, def update_storage_config(self, context, update_storage=False,
reinstall_required=False, reboot_required=True, reinstall_required=False, reboot_required=True,
filesystem_list=None): filesystem_list=None):
@ -1883,3 +1891,43 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
""" """
return self.cast(context, self.make_msg('kube_upgrade_networking', return self.cast(context, self.make_msg('kube_upgrade_networking',
kube_version=kube_version)) kube_version=kube_version))
def store_bitstream_file(self, context, filename):
"""Asynchronously, have the conductor store the device image
on this host.
:param context: request context
:param filename: name of the bitstream file
"""
return self.cast(context, self.make_msg('store_bitstream_file',
filename=filename))
def delete_bitstream_file(self, context, filename):
"""Asynchronously, have the conductor remove the device image
on this host.
:param context: request context
:param filename: name of the bitstream file
"""
return self.cast(context, self.make_msg('delete_bitstream_file',
filename=filename))
def host_device_image_update(self, context, host_uuid):
"""Asynchronously, have the conductor update the device image
on this host.
:param context: request context
:param host_uuid: uuid or id of the host
"""
return self.cast(context, self.make_msg('host_device_image_update',
host_uuid=host_uuid))
def host_device_image_update_abort(self, context, host_uuid):
"""Asynchronously, have the conductor abort the device image update
on this host.
:param context: request context
:param host_uuid: uuid or id of the host
"""
return self.cast(context, self.make_msg('host_device_image_update_abort',
host_uuid=host_uuid))

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
"""SQLAlchemy storage backend.""" """SQLAlchemy storage backend."""
@ -45,6 +45,7 @@ from oslo_utils import uuidutils
from sysinv._i18n import _ from sysinv._i18n import _
from sysinv import objects from sysinv import objects
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import device as dconstants
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import utils from sysinv.common import utils
from sysinv.db import api from sysinv.db import api
@ -1149,6 +1150,26 @@ def add_host_fs_filter_by_ihost(query, value):
return query.filter(models.ihost.uuid == value) return query.filter(models.ihost.uuid == value)
def add_deviceimage_filter(query, value):
"""Adds a deviceimage-specific filter to a query.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if uuidutils.is_uuid_like(value):
return query.filter(or_(models.DeviceImageRootKey.uuid == value,
models.DeviceImageFunctional.uuid == value,
models.DeviceImageKeyRevocation.uuid == value))
elif utils.is_int_like(value):
return query.filter(or_(models.DeviceImageRootKey.id == value,
models.DeviceImageFunctional.id == value,
models.DeviceImageKeyRevocation.id == value))
else:
return add_identity_filter(query, value, use_name=True)
class Connection(api.Connection): class Connection(api.Connection):
"""SqlAlchemy connection.""" """SqlAlchemy connection."""
@ -1793,6 +1814,87 @@ class Connection(api.Connection):
filter_by(id=memory_id).\ filter_by(id=memory_id).\
delete() delete()
@objects.objectify(objects.fpga_device)
def fpga_device_create(self, hostid, values):
if utils.is_int_like(hostid):
host = self.ihost_get(int(hostid))
elif utils.is_uuid_like(hostid):
host = self.ihost_get(hostid.strip())
elif isinstance(hostid, models.ihost):
host = hostid
else:
raise exception.NodeNotFound(node=hostid)
values['host_id'] = host['id']
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
fpga_device = models.FpgaDevice()
fpga_device.update(values)
with _session_for_write() as session:
try:
session.add(fpga_device)
session.flush()
except db_exc.DBDuplicateEntry:
LOG.error("Failed to add FPGA device (uuid: %s), FPGA device with PCI "
"address %s on host %s already exists" %
(values['uuid'],
values['pciaddr'],
values['host_id']))
raise exception.PCIAddrAlreadyExists(pciaddr=values['pciaddr'],
host=values['host_id'])
return self._fpga_device_get(values['pciaddr'], values['host_id'])
def _fpga_device_get(self, pciaddr, hostid=None):
query = model_query(models.FpgaDevice)
if hostid:
query = query.filter_by(host_id=hostid)
query = add_identity_filter(query, pciaddr, use_pciaddr=True)
try:
result = query.one()
except NoResultFound:
raise exception.PCIAddrNotFound(pciaddr=pciaddr)
return result
@objects.objectify(objects.fpga_device)
def fpga_device_get(self, deviceid, hostid=None):
return self._fpga_device_get(deviceid, hostid)
@objects.objectify(objects.fpga_device)
def fpga_device_update(self, device_id, values, forihostid=None):
with _session_for_write() as session:
# May need to reserve in multi controller system; ref sysinv
query = model_query(models.FpgaDevice, read_deleted="no",
session=session)
if forihostid:
query = query.filter_by(host_id=forihostid)
try:
query = add_identity_filter(query, device_id)
result = query.one()
for k, v in values.items():
setattr(result, k, v)
except NoResultFound:
raise exception.InvalidParameterValue(
err="No entry found for device %s" % device_id)
except MultipleResultsFound:
raise exception.InvalidParameterValue(
err="Multiple entries found for device %s" % device_id)
return query.one()
@objects.objectify(objects.fpga_device)
def fpga_device_get_by_host(self, host, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.FpgaDevice)
query = add_device_filter_by_host(query, host)
return _paginate_query(models.FpgaDevice, limit, marker,
sort_key, sort_dir, query)
@objects.objectify(objects.pci_device) @objects.objectify(objects.pci_device)
def pci_device_create(self, hostid, values): def pci_device_create(self, hostid, values):
@ -8259,3 +8361,423 @@ class Connection(api.Connection):
except NoResultFound: except NoResultFound:
raise exception.KubeUpgradeNotFound(upgrade_id=upgrade_id) raise exception.KubeUpgradeNotFound(upgrade_id=upgrade_id)
query.delete() query.delete()
def _deviceimage_get(self, model_class, deviceimage_id, obj=None):
session = None
if obj:
session = inspect(obj).session
query = model_query(model_class, session=session)
query = add_deviceimage_filter(query, deviceimage_id)
try:
result = query.one()
except NoResultFound:
raise exception.DeviceImageNotFound(
deviceimage_uuid=deviceimage_id)
except MultipleResultsFound:
raise exception.InvalidParameterValue(
err="Multiple entries found for deviceimage %s" % deviceimage_id)
return result
def _deviceimage_get_one(self, deviceimage_id, deviceimage=None):
entity = with_polymorphic(models.DeviceImage, '*')
query = model_query(entity)
query = add_deviceimage_filter(query, deviceimage_id)
if deviceimage is not None:
query = query.filter_by(network_type=deviceimage)
try:
result = query.one()
except NoResultFound:
raise exception.DeviceImageNotFound(
deviceimage_uuid=deviceimage_id)
except MultipleResultsFound:
raise exception.InvalidParameterValue(
err="Multiple entries found for deviceimage %s" % deviceimage_id)
return result
def _deviceimage_create(self, obj, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
with _session_for_write() as session:
# The id is null for ae interfaces with more than one member interface
temp_id = obj.id
obj.update(values)
if obj.id is None:
obj.id = temp_id
try:
session.add(obj)
session.flush()
except db_exc.DBDuplicateEntry:
LOG.error("Failed to add deviceimage (uuid: %s), "
"name %s already exists." %
(values['uuid'], values.get('name')))
raise exception.DeviceImageAlreadyExists(
name=values.get('name'))
return self._deviceimage_get(type(obj), values['uuid'])
@objects.objectify(objects.device_image)
def deviceimage_create(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
bitstream_type = values.get('bitstream_type')
if bitstream_type == dconstants.BITSTREAM_TYPE_ROOT_KEY:
deviceimage = models.DeviceImageRootKey()
elif bitstream_type == dconstants.BITSTREAM_TYPE_FUNCTIONAL:
deviceimage = models.DeviceImageFunctional()
elif bitstream_type == dconstants.BITSTREAM_TYPE_KEY_REVOCATION:
deviceimage = models.DeviceImageKeyRevocation()
else:
raise exception.DeviceImageTypeUnsupported(
bitstream_type=bitstream_type)
return self._deviceimage_create(deviceimage, values)
@objects.objectify(objects.device_image)
def deviceimage_get(self, deviceimage_id):
return self._deviceimage_get_one(deviceimage_id)
def _add_deviceimage_filters(self, query, filters):
if filters is None:
filters = dict()
supported_filters = {'bitstream_type',
'name',
}
unsupported_filters = set(filters).difference(supported_filters)
if unsupported_filters:
msg = _("SqlAlchemy API does not support "
"filtering by %s") % ', '.join(unsupported_filters)
raise ValueError(msg)
for field in supported_filters:
if field in filters:
query = query.filter_by(**{field: filters[field]})
return query
@objects.objectify(objects.device_image)
def deviceimages_get_all(self, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
with _session_for_read() as session:
deviceimages = with_polymorphic(models.DeviceImage, '*')
query = model_query(deviceimages, session=session)
query = self._add_deviceimage_filters(query, filters)
return _paginate_query(models.DeviceImage, limit, marker,
sort_key, sort_dir, query)
@objects.objectify(objects.device_image)
def deviceimage_update(self, deviceimage_uuid, values):
with _session_for_write() as session:
query = model_query(models.DeviceImage, session=session)
query = add_identity_filter(query, deviceimage_uuid)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.DeviceImageNotFound(
deviceimage_uuid=deviceimage_uuid)
return query.one()
def deviceimage_destroy(self, deviceimage_uuid):
query = model_query(models.DeviceImage)
query = add_identity_filter(query, deviceimage_uuid)
try:
query.one()
except NoResultFound:
raise exception.DeviceImageNotFound(
deviceimage_uuid=deviceimage_uuid)
query.delete()
def _device_label_get(self, device_label_id):
query = model_query(models.DeviceLabel)
query = add_identity_filter(query, device_label_id)
try:
result = query.one()
except NoResultFound:
raise exception.DeviceLabelNotFound(uuid=device_label_id)
return result
@objects.objectify(objects.device_label)
def device_label_create(self, device_uuid, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
values['device_uuid'] = device_uuid
host_device_label = models.DeviceLabel()
host_device_label.update(values)
with _session_for_write() as session:
try:
session.add(host_device_label)
session.flush()
except db_exc.DBDuplicateEntry:
LOG.error("Failed to add host device label %s. "
"Already exists with this uuid" %
(values['label_key']))
raise exception.DeviceLabelAlreadyExists(
label=values['label_key'], host=values['host_uuid'])
return self._device_label_get(values['uuid'])
@objects.objectify(objects.device_label)
def device_label_get(self, uuid):
query = model_query(models.DeviceLabel)
query = query.filter_by(uuid=uuid)
try:
result = query.one()
except NoResultFound:
raise exception.InvalidParameterValue(
err="No device label entry found for %s" % uuid)
return result
@objects.objectify(objects.device_label)
def device_label_get_all(self, deviceid=None):
query = model_query(models.DeviceLabel, read_deleted="no")
if deviceid:
query = query.filter_by(device_id=deviceid)
return query.all()
@objects.objectify(objects.device_label)
def device_label_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return _paginate_query(models.DeviceLabel, limit, marker,
sort_key, sort_dir)
@objects.objectify(objects.device_label)
def device_label_get_by_label(self, label_key, label_value,
limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.DeviceLabel)
query = query.filter_by(label_key=label_key,
label_value=label_value)
return query.all()
@objects.objectify(objects.device_label)
def device_label_update(self, uuid, values):
with _session_for_write() as session:
query = model_query(models.DeviceLabel, session=session)
query = query.filter_by(uuid=uuid)
count = query.update(values, synchronize_session='fetch')
if count == 0:
raise exception.DeviceLabelNotFound(uuid)
return query.one()
def device_label_destroy(self, uuid):
with _session_for_write() as session:
query = model_query(models.DeviceLabel, session=session)
query = query.filter_by(uuid=uuid)
try:
query.one()
except NoResultFound:
raise exception.DeviceLabelNotFound(uuid)
query.delete()
@objects.objectify(objects.device_label)
def device_label_get_by_device(self, device_uuid,
limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.DeviceLabel)
query = query.filter_by(pcidevice_uuid=device_uuid)
return _paginate_query(models.DeviceLabel, limit, marker,
sort_key, sort_dir, query)
def _device_label_query(self, device_id, label_key, session=None):
query = model_query(models.DeviceLabel, session=session)
query = query.filter(models.DeviceLabel.pcidevice_id == device_id)
query = query.filter(models.DeviceLabel.label_key == label_key)
try:
result = query.one()
except NoResultFound:
raise exception.DeviceLabelNotFoundByKey(label=label_key)
return result
@objects.objectify(objects.device_label)
def device_label_query(self, device_id, label_key):
return self._device_label_query(device_id, label_key)
def count_hosts_by_device_label(self, device_label):
query = model_query(models.DeviceLabel, read_deleted="no")
query = query.filter(models.DeviceLabel.label_key == device_label)
return query.count()
def _device_image_label_get(self, device_image_label_id):
query = model_query(models.DeviceImageLabel)
query = add_identity_filter(query, device_image_label_id)
try:
result = query.one()
except NoResultFound:
raise exception.DeviceLabelNotFound(uuid=device_image_label_id)
return result
@objects.objectify(objects.device_image_label)
def device_image_label_create(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
device_image_label = models.DeviceImageLabel()
device_image_label.update(values)
with _session_for_write() as session:
try:
session.add(device_image_label)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.DeviceImageLabelAlreadyExists(
uuid=values['uuid'])
return self._device_image_label_get(values['uuid'])
@objects.objectify(objects.device_image_label)
def device_image_label_get(self, uuid):
query = model_query(models.DeviceImageLabel)
query = query.filter_by(uuid=uuid)
try:
result = query.one()
except NoResultFound:
raise exception.InvalidParameterValue(
err="No device image label entry found for %s" % uuid)
return result
@objects.objectify(objects.device_image_label)
def device_image_label_update(self, uuid, values):
with _session_for_write() as session:
query = model_query(models.DeviceImageLabel, session=session)
query = query.filter_by(uuid=uuid)
count = query.update(values, synchronize_session='fetch')
if count == 0:
raise exception.DeviceImageLabelNotFound(uuid)
return query.one()
@objects.objectify(objects.device_image_label)
def device_image_label_get_by_image(self, image_id,
limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.DeviceImageLabel)
query = query.filter_by(image_id=image_id)
return query.all()
@objects.objectify(objects.device_image_label)
def device_image_label_get_by_image_label(self, image_id, label_id,
limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.DeviceImageLabel)
query = query.filter_by(image_id=image_id, label_id=label_id)
try:
return query.one()
except NoResultFound:
raise exception.DeviceImageLabelNotFoundByKey(
image_id=image_id, label_id=label_id)
def device_image_label_destroy(self, id):
with _session_for_write() as session:
query = model_query(models.DeviceImageLabel, session=session)
query = add_identity_filter(query, id)
try:
query.one()
except NoResultFound:
raise exception.DeviceImageLabelNotFound(uuid=id)
query.delete()
def _device_image_state_get(self, id):
query = model_query(models.DeviceImageState)
query = add_identity_filter(query, id)
try:
return query.one()
except NoResultFound:
raise exception.DeviceImageStateNotFound(id=id)
@objects.objectify(objects.device_image_state)
def device_image_state_create(self, values):
if not values.get('uuid'):
values['uuid'] = uuidutils.generate_uuid()
device_image_state = models.DeviceImageState()
device_image_state.update(values)
with _session_for_write() as session:
try:
session.add(device_image_state)
session.flush()
except db_exc.DBDuplicateEntry:
raise exception.DeviceImageStateAlreadyExists(uuid=values['uuid'])
return self._device_image_state_get(values['uuid'])
@objects.objectify(objects.device_image_state)
def device_image_state_get(self, id):
return self._device_image_state_get(id)
@objects.objectify(objects.device_image_state)
def device_image_state_get_one(self):
query = model_query(models.DeviceImageState)
try:
return query.one()
except NoResultFound:
raise exception.NotFound()
@objects.objectify(objects.device_image_state)
def device_image_state_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.DeviceImageState)
return _paginate_query(models.DeviceImageState, limit, marker,
sort_key, sort_dir, query)
@objects.objectify(objects.device_image_state)
def device_image_state_update(self, id, values):
with _session_for_write() as session:
query = model_query(models.DeviceImageState, session=session)
query = add_identity_filter(query, id)
count = query.update(values, synchronize_session='fetch')
if count != 1:
raise exception.DeviceImageStateNotFound(id=id)
return query.one()
def device_image_state_destroy(self, id):
with _session_for_write() as session:
query = model_query(models.DeviceImageState, session=session)
query = add_identity_filter(query, id)
try:
query.one()
except NoResultFound:
raise exception.DeviceImageStateNotFound(id=id)
query.delete()
@objects.objectify(objects.device_image_state)
def device_image_state_get_by_image_device(self, image_id, pcidevice_id,
limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.DeviceImageState)
query = query.filter_by(image_id=image_id,
pcidevice_id=pcidevice_id)
try:
return query.one()
except NoResultFound:
raise exception.DeviceImageStateNotFoundByKey(image_id=image_id,
device_id=pcidevice_id)
@objects.objectify(objects.device_image_state)
def device_image_state_get_all(self, host_id=None, pcidevice_id=None,
image_id=None, status=None,
limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.DeviceImageState)
if host_id:
query = query.filter_by(host_id=host_id)
if pcidevice_id:
query = query.filter_by(pcidevice_id=pcidevice_id)
if image_id:
query = query.filter_by(image_id=image_id)
if status:
query = query.filter_by(status=status)
return query.all()

View File

@ -0,0 +1,14 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -0,0 +1,14 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -0,0 +1,14 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -0,0 +1,14 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -0,0 +1,14 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
def upgrade(migrate_engine):
pass
def downgrade(migration_engine):
pass

View File

@ -0,0 +1,78 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import String, Integer, DateTime, Boolean
from sqlalchemy import ForeignKey, UniqueConstraint
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
Table('i_host', meta, autoload=True)
pci_devices = Table('pci_devices', meta, autoload=True)
fpga_devices = Table(
'fpga_devices',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('host_id', Integer, ForeignKey('i_host.id',
ondelete='CASCADE')),
Column('pci_id', Integer, ForeignKey('pci_devices.id',
ondelete='CASCADE')),
Column('pciaddr', String(32)),
Column('bmc_build_version', String(32)),
Column('bmc_fw_version', String(32)),
Column('root_key', String(128)),
Column('revoked_key_ids', String(512)),
Column('boot_page', String(16)),
Column('bitstream_id', String(32)),
UniqueConstraint('pciaddr', 'host_id', name='u_pciaddrhost'),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
fpga_devices.create()
Table('ports', meta, autoload=True)
fpga_ports = Table(
'fpga_ports',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('port_id', Integer, ForeignKey('ports.id', ondelete='CASCADE')),
Column('fpga_id', Integer, ForeignKey('fpga_devices.id', ondelete='CASCADE')),
UniqueConstraint('port_id', 'fpga_id', name='u_port_id@fpga_id'),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
fpga_ports.create()
# Add new fields to pci_device table
pci_devices.create_column(Column('status', String(128)))
pci_devices.create_column(Column('needs_firmware_update', Boolean, default=False))
def downgrade(migrate_engine):
# Downgrade is unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')

View File

@ -0,0 +1,201 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import DateTime, String, Integer, Boolean, Text
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import ForeignKey, UniqueConstraint
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
"""
This database upgrade creates a device_images, device_labels and
device_image_state tables.
"""
meta = MetaData()
meta.bind = migrate_engine
device_images = Table(
'device_images',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('bitstream_type', String(255)),
# The pci_vendor and pci_device fields cannot be referenced from the
# pci_devices table. The device images intended for a specific
# vendor/device on a subcloud may not be present on the
# SystemController region
Column('pci_vendor', String(4)),
Column('pci_device', String(4)),
Column('name', String(255)),
Column('description', String(255)),
Column('image_version', String(255)),
Column('applied', Boolean, nullable=False, default=False),
Column('capabilities', Text),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
device_images_rootkey = Table(
'device_images_rootkey',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer,
ForeignKey('device_images.id', ondelete="CASCADE"),
primary_key=True, nullable=False),
Column('key_signature', String(255), nullable=False),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
device_images_functional = Table(
'device_images_functional',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer,
ForeignKey('device_images.id', ondelete="CASCADE"),
primary_key=True, nullable=False),
Column('bitstream_id', String(255), nullable=False),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
device_images_keyrevocation = Table(
'device_images_keyrevocation',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer,
ForeignKey('device_images.id', ondelete="CASCADE"),
primary_key=True, nullable=False),
Column('revoke_key_id', Integer, nullable=False),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
host = Table('i_host', meta, autoload=True)
Table('pci_devices', meta, autoload=True)
Table('fpga_devices', meta, autoload=True)
device_labels = Table(
'device_labels',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('host_id', Integer,
ForeignKey('i_host.id', ondelete='CASCADE')),
Column('pcidevice_id', Integer,
ForeignKey('pci_devices.id', ondelete='CASCADE')),
Column('fpgadevice_id', Integer,
ForeignKey('fpga_devices.id', ondelete='CASCADE')),
Column('label_key', String(384)),
Column('label_value', String(128)),
Column('capabilities', Text),
UniqueConstraint('pcidevice_id', 'label_key',
name='u_pcidevice_id@label_key'),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
device_image_labels = Table(
'device_image_labels',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('image_id', Integer,
ForeignKey('device_images.id', ondelete='CASCADE')),
Column('label_id', Integer,
ForeignKey('device_labels.id', ondelete='CASCADE')),
Column('status', String(128)),
Column('capabilities', Text),
UniqueConstraint('image_id', 'label_id', name='u_image_id@label_id'),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
device_image_state = Table(
'device_image_state',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), unique=True),
Column('host_id', Integer,
ForeignKey('i_host.id', ondelete='CASCADE')),
Column('pcidevice_id', Integer,
ForeignKey('pci_devices.id', ondelete='CASCADE')),
Column('image_id', Integer,
ForeignKey('device_images.id', ondelete='CASCADE')),
Column('status', String(128)),
Column('update_start_time', DateTime),
Column('capabilities', Text),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
tables = (
device_images,
device_images_rootkey,
device_images_functional,
device_images_keyrevocation,
device_labels,
device_image_labels,
device_image_state,
)
for index, table in enumerate(tables):
try:
table.create()
except Exception:
# If an error occurs, drop all tables created so far to return
# to the previously existing state.
meta.drop_all(tables=tables[:index])
raise
# Add the device_image_update attribute
host.create_column(Column('device_image_update', String(64)))
host.create_column(Column('reboot_needed', Boolean, default=False))
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Downgrade is unsupported.
raise NotImplementedError('SysInv database downgrade is unsupported.')

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
@ -234,6 +234,9 @@ class ihost(Base):
ttys_dcd = Column(Boolean) ttys_dcd = Column(Boolean)
iscsi_initiator_name = Column(String(64)) iscsi_initiator_name = Column(String(64))
device_image_update = Column(String(64))
reboot_needed = Column(Boolean, nullable=False, default=False)
forisystemid = Column(Integer, forisystemid = Column(Integer,
ForeignKey('i_system.id', ondelete='CASCADE')) ForeignKey('i_system.id', ondelete='CASCADE'))
peer_id = Column(Integer, peer_id = Column(Integer,
@ -1459,11 +1462,170 @@ class PciDevice(Base):
enabled = Column(Boolean) enabled = Column(Boolean)
extra_info = Column(Text) extra_info = Column(Text)
host = relationship("ihost", lazy="joined", join_depth=1) status = Column(String(128))
needs_firmware_update = Column(Boolean, nullable=False, default=False)
host = relationship("ihost", lazy="joined", join_depth=1)
fpga = relationship("FpgaDevice", lazy="joined", uselist=False, join_depth=1)
UniqueConstraint('pciaddr', 'host_id', name='u_pciaddrhost') UniqueConstraint('pciaddr', 'host_id', name='u_pciaddrhost')
class FpgaDevice(Base):
__tablename__ = 'fpga_devices'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
host_id = Column(Integer, ForeignKey('i_host.id', ondelete='CASCADE'))
pci_id = Column(Integer, ForeignKey('pci_devices.id', ondelete='CASCADE'))
pciaddr = Column(String(32))
bmc_build_version = Column(String(32))
bmc_fw_version = Column(String(32))
root_key = Column(String(128))
revoked_key_ids = Column(String(512))
boot_page = Column(String(16))
bitstream_id = Column(String(32))
host = relationship("ihost", lazy="joined", join_depth=1)
pcidevice = relationship("PciDevice", lazy="joined", join_depth=1)
UniqueConstraint('pciaddr', 'host_id', name='u_pciaddrhost')
class FpgaPorts(Base):
__tablename__ = 'fpga_ports'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), unique=True)
port_id = Column(Integer, ForeignKey('ports.id', ondelete='CASCADE'))
fpga_id = Column(Integer,
ForeignKey('fpga_devices.id', ondelete='CASCADE'))
ports = relationship("Ports", lazy="joined", join_depth=1)
fpga_device = relationship("FpgaDevice", lazy="joined",
backref="fpga_ports", join_depth=1)
UniqueConstraint('port_id', 'fpga_id', name='u_port_id@fpga_id')
class DeviceImage(Base):
__tablename__ = 'device_images'
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
bitstream_type = Column(String(255))
pci_vendor = Column(String(4))
pci_device = Column(String(4))
name = Column(String(255))
description = Column(String(255))
image_version = Column(String(255))
applied = Column(Boolean, nullable=False, default=False)
capabilities = Column(JSONEncodedDict)
__mapper_args__ = {
'polymorphic_identity': 'deviceimage',
'polymorphic_on': bitstream_type,
'with_polymorphic': '*',
}
class DeviceImageCommon(object):
@declared_attr
def id(cls):
return Column(Integer,
ForeignKey('device_images.id', ondelete="CASCADE"),
primary_key=True, nullable=False)
class DeviceImageRootKey(DeviceImageCommon, DeviceImage):
__tablename__ = 'device_images_rootkey'
key_signature = Column(String(255), nullable=True)
__mapper_args__ = {
'polymorphic_identity': 'root-key',
}
class DeviceImageFunctional(DeviceImageCommon, DeviceImage):
__tablename__ = 'device_images_functional'
bitstream_id = Column(String(255), nullable=True)
__mapper_args__ = {
'polymorphic_identity': 'functional',
}
class DeviceImageKeyRevocation(DeviceImageCommon, DeviceImage):
__tablename__ = 'device_images_keyrevocation'
revoke_key_id = Column(Integer, nullable=True)
__mapper_args__ = {
'polymorphic_identity': 'key-revocation',
}
class DeviceLabel(Base):
__tablename__ = 'device_labels'
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
host_id = Column(Integer, ForeignKey('i_host.id', ondelete='CASCADE'))
pcidevice_id = Column(Integer, ForeignKey('pci_devices.id',
ondelete='CASCADE'))
fpgadevice_id = Column(Integer, ForeignKey('fpga_devices.id',
ondelete='CASCADE'))
capabilities = Column(JSONEncodedDict)
host = relationship("ihost", lazy="joined", join_depth=1)
pcidevice = relationship("PciDevice", lazy="joined", join_depth=1)
fpgadevice = relationship("FpgaDevice", lazy="joined", join_depth=1)
label_key = Column(String(384))
label_value = Column(String(128))
UniqueConstraint('pcidevice_id', 'label_key', name='u_pcidevice_id@label_key')
class DeviceImageLabel(Base):
__tablename__ = 'device_image_labels'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), unique=True)
image_id = Column(
Integer, ForeignKey('device_images.id', ondelete='CASCADE'))
label_id = Column(
Integer, ForeignKey('device_labels.id', ondelete='CASCADE'))
status = Column(String(128))
capabilities = Column(JSONEncodedDict)
image = relationship(
"DeviceImage", lazy="joined", backref="device_image_labels")
label = relationship(
"DeviceLabel", lazy="joined", backref="device_image_labels")
UniqueConstraint('image_id', 'label_id', name='u_image_id@label_id')
class DeviceImageState(Base):
__tablename__ = 'device_image_state'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), unique=True)
host_id = Column(Integer, ForeignKey('i_host.id', ondelete='CASCADE'))
pcidevice_id = Column(
Integer, ForeignKey('pci_devices.id', ondelete='CASCADE'))
image_id = Column(
Integer, ForeignKey('device_images.id', ondelete='CASCADE'))
status = Column(String(128))
update_start_time = Column(DateTime(timezone=False))
capabilities = Column(JSONEncodedDict)
host = relationship("ihost", lazy="joined", join_depth=1)
pcidevice = relationship(
"PciDevice", lazy="joined", backref="device_image_state")
image = relationship(
"DeviceImage", lazy="joined", backref="device_image_state")
class SoftwareUpgrade(Base): class SoftwareUpgrade(Base):
__tablename__ = 'software_upgrade' __tablename__ = 'software_upgrade'

View File

@ -16,10 +16,10 @@ class CephPoolsAuditHelm(base.BaseHelm):
CHART = common.HELM_CHART_CEPH_POOLS_AUDIT CHART = common.HELM_CHART_CEPH_POOLS_AUDIT
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \ SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
[common.HELM_NS_STORAGE_PROVISIONER] [common.HELM_NS_RBD_PROVISIONER]
SUPPORTED_APP_NAMESPACES = { SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_PLATFORM: constants.HELM_APP_PLATFORM:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER], base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_RBD_PROVISIONER],
} }
SERVICE_NAME = 'ceph-pools' SERVICE_NAME = 'ceph-pools'
@ -28,7 +28,7 @@ class CephPoolsAuditHelm(base.BaseHelm):
# On application load this chart is enabled. Only disable if specified # On application load this chart is enabled. Only disable if specified
# by the user # by the user
if not self._is_enabled(operator.APP, self.CHART, if not self._is_enabled(operator.APP, self.CHART,
common.HELM_NS_STORAGE_PROVISIONER): common.HELM_NS_RBD_PROVISIONER):
operator.chart_group_chart_delete( operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART], operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART]) operator.CHARTS_LUT[self.CHART])
@ -72,7 +72,7 @@ class CephPoolsAuditHelm(base.BaseHelm):
tiers_cfg.append(tier_cfg) tiers_cfg.append(tier_cfg)
overrides = { overrides = {
common.HELM_NS_STORAGE_PROVISIONER: { common.HELM_NS_RBD_PROVISIONER: {
'conf': { 'conf': {
'ceph': { 'ceph': {
'monitors': monitors, 'monitors': monitors,

View File

@ -1,9 +1,10 @@
# #
# Copyright (c) 2018-2019 Wind River Systems, Inc. # Copyright (c) 2018-2020 Wind River Systems, Inc.
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# #
import tsconfig.tsconfig as tsc
from sysinv.common import constants from sysinv.common import constants
from sysinv.common import exception from sysinv.common import exception
from sysinv.common.storage_backend_conf import StorageBackendConfig from sysinv.common.storage_backend_conf import StorageBackendConfig
@ -21,10 +22,30 @@ class CinderHelm(openstack.OpenstackBaseHelm):
SERVICE_TYPE = 'volume' SERVICE_TYPE = 'volume'
AUTH_USERS = ['cinder'] AUTH_USERS = ['cinder']
def _get_mount_overrides(self):
overrides = {
'volumes': [],
'volumeMounts': []
}
overrides['volumes'].append({
'name': 'newvolume',
'hostPath': {'path': tsc.IMAGE_CONVERSION_PATH}
})
overrides['volumeMounts'].append({
'name': 'newvolume',
'mountPath': tsc.IMAGE_CONVERSION_PATH
})
return overrides
def get_overrides(self, namespace=None): def get_overrides(self, namespace=None):
overrides = { overrides = {
common.HELM_NS_OPENSTACK: { common.HELM_NS_OPENSTACK: {
'pod': { 'pod': {
'mounts': {
'cinder_volume': {
'cinder_volume': self._get_mount_overrides()
}
},
'replicas': { 'replicas': {
'api': self._num_controllers(), 'api': self._num_controllers(),
'volume': self._num_controllers(), 'volume': self._num_controllers(),
@ -99,6 +120,17 @@ class CinderHelm(openstack.OpenstackBaseHelm):
str(b.name.encode('utf8', 'strict').decode('utf-8')) for b in backends) str(b.name.encode('utf8', 'strict').decode('utf-8')) for b in backends)
}, },
} }
current_host_fs_list = self.dbapi.host_fs_get_list()
chosts = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
chosts_fs = [fs for fs in current_host_fs_list
if fs['name'] == constants.FILESYSTEM_NAME_IMAGE_CONVERSION]
# conversion overrides should be generated only if each controller node
# configured has the conversion partition added
if len(chosts) == len(chosts_fs):
conf_cinder['DEFAULT']['image_conversion_dir'] = \
tsc.IMAGE_CONVERSION_PATH
# Always set the default_volume_type to the volume type associated with the # Always set the default_volume_type to the volume type associated with the
# primary Ceph backend/tier which is available on all StarlingX platform # primary Ceph backend/tier which is available on all StarlingX platform

View File

@ -80,9 +80,7 @@ HELM_NS_NFS = 'nfs'
HELM_NS_OPENSTACK = 'openstack' HELM_NS_OPENSTACK = 'openstack'
HELM_NS_HELM_TOOLKIT = 'helm-toolkit' HELM_NS_HELM_TOOLKIT = 'helm-toolkit'
HELM_NS_MONITOR = 'monitor' HELM_NS_MONITOR = 'monitor'
HELM_NS_RBD_PROVISIONER = HELM_NS_KUBE_SYSTEM
# Namespaces: for system functions
HELM_NS_STORAGE_PROVISIONER = HELM_NS_KUBE_SYSTEM
# Services # Services
# Matches configassistant.py value => Should change to STARLINGX # Matches configassistant.py value => Should change to STARLINGX

View File

@ -55,8 +55,7 @@ class ElasticsearchDataHelm(elastic.ElasticBaseHelm):
'accessModes': ["ReadWriteOnce"], 'accessModes': ["ReadWriteOnce"],
'resources': { 'resources': {
'requests': {'storage': str(self.DATA_VOLUME_SIZE_GB) + 'Gi'} 'requests': {'storage': str(self.DATA_VOLUME_SIZE_GB) + 'Gi'}
}, }
'storageClass': 'general'
}, },
'nodeSelector': {common.LABEL_MONITOR_DATA: "enabled"}, 'nodeSelector': {common.LABEL_MONITOR_DATA: "enabled"},
'antiAffinity': "hard", 'antiAffinity': "hard",

View File

@ -52,8 +52,7 @@ class ElasticsearchMasterHelm(elastic.ElasticBaseHelm):
'accessModes': ["ReadWriteOnce"], 'accessModes': ["ReadWriteOnce"],
'resources': { 'resources': {
'requests': {'storage': '4Gi'} 'requests': {'storage': '4Gi'}
}, }
'storageClass': 'general'
}, },
} }
} }

View File

@ -18,7 +18,9 @@ class FilebeatHelm(elastic.ElasticBaseHelm):
system_fields = self.get_system_info_overrides() system_fields = self.get_system_info_overrides()
overrides = { overrides = {
common.HELM_NS_MONITOR: { common.HELM_NS_MONITOR: {
'config': self._get_config_overrides(system_fields), 'filebeatConfig': {
'filebeat.yml': self._get_config_overrides(system_fields),
},
'resources': self._get_resources_overrides(), 'resources': self._get_resources_overrides(),
} }
} }
@ -34,14 +36,23 @@ class FilebeatHelm(elastic.ElasticBaseHelm):
def _get_config_overrides(self, system_fields): def _get_config_overrides(self, system_fields):
conf = { conf = {
'name': '${NODE_NAME}', 'name': '${NODE_NAME}',
'processors': [{'add_kubernetes_metadata': {'in_cluster': True}}], 'processors': [
{
'add_kubernetes_metadata': {
'labels.dedot': True,
'annotations.dedot': True
# If kube_config is not set, KUBECONFIG environment variable will be checked
# and if not present it will fall back to InCluster
}
}
],
'fields_under_root': True,
'fields': {
"system": system_fields
},
'filebeat.inputs': [ 'filebeat.inputs': [
{ {
'enabled': True, 'enabled': True,
'fields_under_root': True,
'fields': {
"system": system_fields
},
'paths': [ 'paths': [
"/var/log/*.log", "/var/log/*.log",
"/var/log/messages", "/var/log/messages",
@ -49,6 +60,10 @@ class FilebeatHelm(elastic.ElasticBaseHelm):
"/var/log/**/*.log" "/var/log/**/*.log"
], ],
'type': "log", 'type': "log",
'exclude_files': [
"^/var/log/containers/",
"^/var/log/pods/"
],
'close_timeout': "5m" 'close_timeout': "5m"
} }
] ]
@ -72,9 +87,9 @@ class FilebeatHelm(elastic.ElasticBaseHelm):
@staticmethod @staticmethod
def _get_resources_overrides(): def _get_resources_overrides():
cpu_request = "40m" cpu_request = "50m"
cpu_limit = "80m" cpu_limit = "180m"
memory_size = "256Mi" memory_size = "512Mi"
return {'requests': { return {'requests': {
'cpu': cpu_request}, 'cpu': cpu_request},

View File

@ -20,6 +20,7 @@ from stevedore import extension
from oslo_log import log as logging from oslo_log import log as logging
from sysinv.common import exception from sysinv.common import exception
from sysinv.common import kubernetes
from sysinv.common import utils from sysinv.common import utils
from sysinv.helm import common from sysinv.helm import common
@ -451,7 +452,7 @@ class HelmOperator(object):
cmd.extend(['--set', value_set]) cmd.extend(['--set', value_set])
env = os.environ.copy() env = os.environ.copy()
env['KUBECONFIG'] = '/etc/kubernetes/admin.conf' env['KUBECONFIG'] = kubernetes.KUBERNETES_ADMIN_CONF
# Make a temporary directory with a fake chart in it # Make a temporary directory with a fake chart in it
try: try:

View File

@ -26,19 +26,18 @@ class LogstashHelm(elastic.ElasticBaseHelm):
overrides = { overrides = {
common.HELM_NS_MONITOR: { common.HELM_NS_MONITOR: {
'replicaCount': replicas, 'replicas': replicas,
'resources': self._get_resources_overrides(), 'resources': self._get_resources_overrides(),
'config': self._get_config(),
} }
} }
if self._is_distributed_cloud_role_subcloud(): if self._is_distributed_cloud_role_subcloud():
subcloud_settings = { subcloud_settings = {
'elasticsearch': { 'elasticsearchHosts': "http://%s:%s%s" % (
'host': "http://%s" %
self._system_controller_floating_address(), self._system_controller_floating_address(),
'port': self.NODE_PORT self.NODE_PORT,
}, self.ELASTICSEARCH_CLIENT_PATH
),
'ingress': {'enabled': False}, 'ingress': {'enabled': False},
} }
overrides[common.HELM_NS_MONITOR].update(subcloud_settings) overrides[common.HELM_NS_MONITOR].update(subcloud_settings)
@ -51,17 +50,7 @@ class LogstashHelm(elastic.ElasticBaseHelm):
else: else:
return overrides return overrides
def _get_config(self):
if self._is_distributed_cloud_role_subcloud():
# this does not accept self.ELASTICSEARCH_CLIENT_PATH
config = {'elasticsearch.path': "/mon-elasticsearch-client"}
else:
config = {'elasticsearch.path': ""}
return config
def _get_resources_overrides(self): def _get_resources_overrides(self):
if (utils.is_aio_system(self.dbapi) and not if (utils.is_aio_system(self.dbapi) and not
self._is_distributed_cloud_role_system_controller()): self._is_distributed_cloud_role_system_controller()):
cpu_limits = "500m" cpu_limits = "500m"
@ -70,7 +59,9 @@ class LogstashHelm(elastic.ElasticBaseHelm):
cpu_limits = "500m" cpu_limits = "500m"
memory_limits = "2048Mi" memory_limits = "2048Mi"
return {'limits': { return {'requests': {
'memory': memory_limits},
'limits': {
'cpu': cpu_limits, 'cpu': cpu_limits,
'memory': memory_limits}, 'memory': memory_limits},
} }

View File

@ -112,9 +112,6 @@ class MonitorArmadaManifestOperator(base.ArmadaManifestOperator):
constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD): constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD):
# remove the chart_groups not needed in this configuration # remove the chart_groups not needed in this configuration
self.chart_group_remove(dbapi,
common.HELM_NS_MONITOR,
self.CHART_GROUP_NGINX)
self.chart_group_remove(dbapi, self.chart_group_remove(dbapi,
common.HELM_NS_MONITOR, common.HELM_NS_MONITOR,
self.CHART_GROUP_KIBANA) self.CHART_GROUP_KIBANA)

View File

@ -27,6 +27,11 @@ class MariadbHelm(openstack.OpenstackBaseHelm):
} }
}, },
'endpoints': self._get_endpoints_overrides(), 'endpoints': self._get_endpoints_overrides(),
'conf': {
'database': {
'config_override': self._get_database_config_override()
}
}
} }
} }
@ -38,6 +43,18 @@ class MariadbHelm(openstack.OpenstackBaseHelm):
else: else:
return overrides return overrides
def _get_database_config_override(self):
listen_host = "0.0.0.0"
if self._is_ipv6_cluster_service():
listen_host = "::"
return "[mysqld]\n" \
"bind_address=::\n" \
"wsrep_provider_options=\"evs.suspect_timeout=PT30S; " \
"gmcast.peer_timeout=PT15S; " \
"gmcast.listen_addr=tcp://%s:{{ tuple \"oslo_db\" " \
"\"direct\" \"wsrep\" . | " \
"include \"helm-toolkit.endpoints.endpoint_port_lookup\" }}\"" % listen_host
def _get_endpoints_overrides(self): def _get_endpoints_overrides(self):
return { return {
'oslo_db': { 'oslo_db': {

View File

@ -20,20 +20,16 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
common.HELM_NS_MONITOR: { common.HELM_NS_MONITOR: {
'systemName': '', 'systemName': '',
'resources': self._get_resources_overrides(), 'resources': self._get_resources_overrides(),
'daemonset': { 'metricbeatConfig': {
'modules': { 'metricbeat.yml': self._get_config_overrides(
'system': self._get_metric_system(), system_fields,
'kubernetes': self._get_metric_kubernetes(), self._get_daemonset_module_config()
}, ),
'config': self._get_config_overrides(system_fields), 'kube-state-metrics-metricbeat.yml': self._get_config_overrides(
system_fields,
self._get_deployment_module_config()
),
}, },
'deployment': {
'modules': {
'kubernetes':
self._get_metric_deployment_kubernetes()
},
'config': self._get_config_overrides(system_fields),
}
} }
} }
@ -45,13 +41,14 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
else: else:
return overrides return overrides
def _get_config_overrides(self, system_fields): def _get_config_overrides(self, system_fields, modules):
conf = { conf = {
'name': '${NODE_NAME}', 'name': '${NODE_NAME}',
'fields_under_root': True, 'fields_under_root': True,
'fields': { 'fields': {
"system": system_fields "system": system_fields
} },
'metricbeat.modules': modules,
} }
if self._is_distributed_cloud_role_subcloud(): if self._is_distributed_cloud_role_subcloud():
@ -70,32 +67,36 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
return conf return conf
def _get_metric_system(self): def _get_daemonset_module_config(self):
conf = { modules = [
"enabled": True, self._get_metric_kubernetes(),
"config": self._get_metric_module_config() ] + self._get_metric_system()
} return modules
return conf
def _get_metric_module_config(self): def _get_deployment_module_config(self):
modules = [
self._get_metric_deployment_kubernetes(),
]
return modules
def _get_metric_system(self):
conf = [ conf = [
{ {
"module": "system", "module": "system",
"enabled": True,
"period": "60s", "period": "60s",
"metricsets": [ "metricsets": [
"cpu", "cpu",
"diskio", "diskio",
"load",
"memory", "memory",
"process_summary",
], ],
"cpu.metrics": [ "cpu.metrics": [
"percentages",
"normalized_percentages" "normalized_percentages"
] ]
}, },
{ {
"module": "system", "module": "system",
"enabled": True,
"period": "60s", "period": "60s",
"metricsets": [ "metricsets": [
"process" "process"
@ -117,6 +118,7 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
}, },
{ {
"module": "system", "module": "system",
"enabled": True,
"period": "60s", "period": "60s",
"metricsets": [ "metricsets": [
"network" "network"
@ -144,10 +146,10 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
}, },
{ {
"module": "system", "module": "system",
"enabled": True,
"period": "5m", "period": "5m",
"metricsets": [ "metricsets": [
"filesystem", "filesystem",
"fsstat",
], ],
"processors": [ "processors": [
{"drop_event.when": { {"drop_event.when": {
@ -163,64 +165,60 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
def _get_metric_kubernetes(self): def _get_metric_kubernetes(self):
conf = { conf = {
"module": "kubernetes",
"enabled": True, "enabled": True,
"config": [ # If kube_config is not set, KUBECONFIG environment variable will be checked
{ # and if not present it will fall back to InCluster
"module": "kubernetes", "add_metadata": True,
"in_cluster": True, "labels.dedot": True,
"add_metadata": True, "annotations.dedot": True,
"metricsets": [ "metricsets": [
"node", "node",
"system", "pod",
"pod", "container"
"container" ],
], "period": "60s",
"period": "10s", "host": "${NODE_NAME}",
"host": "${NODE_NAME}", "hosts": [
"hosts": [ "https://${HOSTNAME}:10250"
"https://${HOSTNAME}:10250" ],
], "bearer_token_file":
"bearer_token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token",
"/var/run/secrets/kubernetes.io/serviceaccount/token", "ssl.verification_mode": "none",
"ssl.verification_mode": "none", "ssl.certificate_authorities": [
"ssl.certificate_authorities": [ "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
"/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
]
}
] ]
} }
return conf return conf
def _get_metric_deployment_kubernetes(self): def _get_metric_deployment_kubernetes(self):
conf = { conf = {
"module": "kubernetes",
"enabled": True, "enabled": True,
"config": [ # If kube_config is not set, KUBECONFIG environment variable will be checked
{ # and if not present it will fall back to InCluster
"module": "kubernetes", "add_metadata": True,
"in_cluster": True, "labels.dedot": True,
"add_metadata": True, "annotations.dedot": True,
"metricsets": [ "metricsets": [
"state_node", "state_node",
"state_deployment", "state_deployment",
"state_replicaset", "state_replicaset",
"state_pod", "state_pod",
"state_container", "state_container",
"event", "event",
"state_statefulset" "state_statefulset"
], ],
"period": "60s", "period": "60s",
"host": "${NODE_NAME}", "host": "${NODE_NAME}",
"hosts": [ "hosts": [
"${KUBE_STATE_METRICS_HOST}:8080" "${KUBE_STATE_METRICS_HOSTS}"
]
}
] ]
} }
return conf return conf
@staticmethod @staticmethod
def _get_resources_overrides(): def _get_resources_overrides():
cpu_request = "50m" cpu_request = "50m"
cpu_limit = "180m" # overload at 150m cpu_limit = "180m" # overload at 150m
memory_limit = "512Mi" memory_limit = "512Mi"

View File

@ -1,20 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.helm import base
MONITOR_SUPPORTED_VERSIONS = [
'1.0-1',
]
class StxMonitorVersionCheckHelm(base.BaseHelm):
"""Class to provide application version check"""
def _get_supported_versions(self):
return MONITOR_SUPPORTED_VERSIONS
def version_check(self, app_version):
return app_version in self._get_supported_versions()

View File

@ -1,23 +0,0 @@
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.helm import base
SUPPORTED_VERSIONS = {
'1.0-19-centos-stable-versioned',
'1.0-19-centos-stable-latest',
'1.0-19',
}
class StxOpenstackVersionCheckHelm(base.BaseHelm):
"""Class to provide application version check"""
def _get_supported_versions(self):
return SUPPORTED_VERSIONS
def version_check(self, app_version):
return app_version in self._get_supported_versions()

View File

@ -17,10 +17,10 @@ class RbdProvisionerHelm(base.BaseHelm):
CHART = common.HELM_CHART_RBD_PROVISIONER CHART = common.HELM_CHART_RBD_PROVISIONER
SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \ SUPPORTED_NAMESPACES = base.BaseHelm.SUPPORTED_NAMESPACES + \
[common.HELM_NS_STORAGE_PROVISIONER] [common.HELM_NS_RBD_PROVISIONER]
SUPPORTED_APP_NAMESPACES = { SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_PLATFORM: constants.HELM_APP_PLATFORM:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_STORAGE_PROVISIONER], base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_RBD_PROVISIONER],
} }
SERVICE_NAME = common.HELM_CHART_RBD_PROVISIONER SERVICE_NAME = common.HELM_CHART_RBD_PROVISIONER
@ -30,7 +30,7 @@ class RbdProvisionerHelm(base.BaseHelm):
# On application load this chart is enabled. Only disable if specified # On application load this chart is enabled. Only disable if specified
# by the user # by the user
if not self._is_enabled(operator.APP, self.CHART, if not self._is_enabled(operator.APP, self.CHART,
common.HELM_NS_STORAGE_PROVISIONER): common.HELM_NS_RBD_PROVISIONER):
operator.chart_group_chart_delete( operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART], operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART]) operator.CHARTS_LUT[self.CHART])
@ -86,7 +86,7 @@ class RbdProvisionerHelm(base.BaseHelm):
} }
overrides = { overrides = {
common.HELM_NS_STORAGE_PROVISIONER: { common.HELM_NS_RBD_PROVISIONER: {
"classdefaults": classdefaults, "classdefaults": classdefaults,
"classes": classes, "classes": classes,
"global": global_settings "global": global_settings

View File

@ -13,6 +13,7 @@ from eventlet.green import subprocess
import ruamel.yaml as yaml import ruamel.yaml as yaml
from oslo_log import log as logging from oslo_log import log as logging
from sysinv.agent import rpcapi as agent_rpcapi from sysinv.agent import rpcapi as agent_rpcapi
from sysinv.common import kubernetes
from sysinv.common import exception from sysinv.common import exception
from sysinv.openstack.common import context from sysinv.openstack.common import context
import threading import threading
@ -43,7 +44,7 @@ def retrieve_helm_releases():
:return: a dict of deployed helm releases :return: a dict of deployed helm releases
""" """
helm_list = subprocess.Popen( helm_list = subprocess.Popen(
['helm', '--kubeconfig', '/etc/kubernetes/admin.conf', ['helm', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF,
'list', '--output', 'yaml'], 'list', '--output', 'yaml'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=subprocess.PIPE, stderr=subprocess.PIPE)
timer = threading.Timer(20, helm_list.kill) timer = threading.Timer(20, helm_list.kill)
@ -93,7 +94,7 @@ def delete_helm_release(release):
:param release: the name of the helm release :param release: the name of the helm release
""" """
helm_cmd = subprocess.Popen( helm_cmd = subprocess.Popen(
['helm', '--kubeconfig', '/etc/kubernetes/admin.conf', ['helm', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF,
'delete', release], 'delete', release],
stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout=subprocess.PIPE, stderr=subprocess.PIPE)
timer = threading.Timer(20, helm_cmd.kill) timer = threading.Timer(20, helm_cmd.kill)
@ -123,7 +124,7 @@ def delete_helm_release(release):
def get_openstack_pending_install_charts(): def get_openstack_pending_install_charts():
try: try:
return subprocess.check_output( return subprocess.check_output(
['helm', '--kubeconfig', '/etc/kubernetes/admin.conf', ['helm', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF,
'list', '--namespace', 'openstack', '--pending']) 'list', '--namespace', 'openstack', '--pending'])
except Exception as e: except Exception as e:
raise exception.HelmTillerFailure( raise exception.HelmTillerFailure(
@ -138,18 +139,21 @@ def helm_upgrade_tiller(image):
# sed command until helm and tiller provide a fix for # sed command until helm and tiller provide a fix for
# https://github.com/helm/helm/issues/6374 # https://github.com/helm/helm/issues/6374
workaround_part1 = '--skip-refresh ' \ workaround_part1 = '--skip-refresh ' \
'--service-account tiller ' \ '--service-account tiller ' \
'--node-selectors "node-role.kubernetes.io/master"="" ' \ '--node-selectors "node-role.kubernetes.io/master"="" ' \
'--override spec.template.spec.hostNetwork=true ' \ '--override spec.template.spec.hostNetwork=true ' \
'--override spec.selector.matchLabels.app=helm ' \ '--override spec.selector.matchLabels.app=helm ' \
'--override spec.selector.matchLabels.name=tiller ' \ '--override spec.selector.matchLabels.name=tiller ' \
'--output yaml' '--output yaml'
workaround_part2 = \ workaround_part2 = \
'| sed "s@apiVersion: extensions/v1beta1@apiVersion: apps/v1@" ' \ '| sed "s@apiVersion: extensions/v1beta1@apiVersion: apps/v1@" ' \
'| kubectl --kubeconfig /etc/kubernetes/admin.conf replace --force -f -' '| kubectl --kubeconfig {} replace --force -f -'.format(
kubernetes.KUBERNETES_ADMIN_CONF)
cmd = '{} {} {} {}'.format( cmd = '{} {} {} {} {} {}'.format(
'helm init --upgrade --kubeconfig /etc/kubernetes/admin.conf --tiller-image', 'helm init --upgrade --kubeconfig',
kubernetes.KUBERNETES_ADMIN_CONF,
'--tiller-image',
image, image,
workaround_part1, workaround_part1,
workaround_part2) workaround_part2)

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# #
# Copyright (c) 2013-2019 Wind River Systems, Inc. # Copyright (c) 2013-2020 Wind River Systems, Inc.
# #
@ -28,7 +28,12 @@ from sysinv.objects import community
from sysinv.objects import controller_fs from sysinv.objects import controller_fs
from sysinv.objects import cpu from sysinv.objects import cpu
from sysinv.objects import datanetwork from sysinv.objects import datanetwork
from sysinv.objects import device_image
from sysinv.objects import device_image_label
from sysinv.objects import device_image_state
from sysinv.objects import device_label
from sysinv.objects import disk from sysinv.objects import disk
from sysinv.objects import fpga_device
from sysinv.objects import partition from sysinv.objects import partition
from sysinv.objects import dns from sysinv.objects import dns
from sysinv.objects import drbdconfig from sysinv.objects import drbdconfig
@ -195,6 +200,11 @@ kube_upgrade = kube_upgrade.KubeUpgrade
kube_version = kube_version.KubeVersion kube_version = kube_version.KubeVersion
datanetwork = datanetwork.DataNetwork datanetwork = datanetwork.DataNetwork
host_fs = host_fs.HostFS host_fs = host_fs.HostFS
device_image = device_image.DeviceImage
device_image_label = device_image_label.DeviceImageLabel
device_image_state = device_image_state.DeviceImageState
device_label = device_label.DeviceLabel
fpga_device = fpga_device.FPGADevice
__all__ = (system, __all__ = (system,
cluster, cluster,
@ -268,6 +278,10 @@ __all__ = (system,
datanetwork, datanetwork,
interface_network, interface_network,
host_fs, host_fs,
device_image,
device_image_label,
device_label,
fpga_device,
# alias objects for RPC compatibility # alias objects for RPC compatibility
ihost, ihost,
ilvg, ilvg,

View File

@ -0,0 +1,45 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class DeviceImage(base.SysinvObject):
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {'id': int,
'uuid': utils.uuid_or_none,
'bitstream_type': utils.str_or_none,
'pci_vendor': utils.str_or_none,
'pci_device': utils.str_or_none,
'bitstream_id': utils.str_or_none,
'key_signature': utils.str_or_none,
'revoke_key_id': utils.int_or_none,
'name': utils.str_or_none,
'description': utils.str_or_none,
'image_version': utils.str_or_none,
'applied': utils.bool_or_none,
'capabilities': utils.dict_or_none,
}
_optional_fields = {'bitstream_id',
'key_signature',
'revoke_key_id',
'name',
'description',
'image_version'}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.deviceimage_get(uuid)
def save_changes(self, context, updates):
self.dbapi.device_image_update(self.uuid, # pylint: disable=no-member
updates)

View File

@ -0,0 +1,40 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class DeviceImageLabel(base.SysinvObject):
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {'id': int,
'uuid': utils.uuid_or_none,
'image_id': utils.int_or_none,
'image_uuid': utils.uuid_or_none,
'label_id': utils.int_or_none,
'label_uuid': utils.uuid_or_none,
'status': utils.str_or_none,
'capabilities': utils.dict_or_none,
}
_foreign_fields = {
'image_id': 'image:id',
'label_id': 'label:id',
'image_uuid': 'image:uuid',
'label_uuid': 'label:uuid',
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.device_image_label_get(uuid)
def save_changes(self, context, updates):
self.dbapi.device_image_label_update(self.uuid, # pylint: disable=no-member
updates)

View File

@ -0,0 +1,42 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class DeviceImageState(base.SysinvObject):
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {'id': int,
'uuid': utils.uuid_or_none,
'host_id': utils.int_or_none,
'host_uuid': utils.uuid_or_none,
'pcidevice_id': utils.int_or_none,
'pcidevice_uuid': utils.uuid_or_none,
'image_id': utils.int_or_none,
'image_uuid': utils.uuid_or_none,
'status': utils.str_or_none,
'update_start_time': utils.datetime_or_str_or_none,
'capabilities': utils.dict_or_none,
}
_foreign_fields = {
'host_uuid': 'host:uuid',
'pcidevice_uuid': 'pcidevice:uuid',
'image_uuid': 'image:uuid',
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.device_image_state_get(uuid)
def save_changes(self, context, updates):
self.dbapi.device_image_state_update(self.uuid, # pylint: disable=no-member
updates)

View File

@ -0,0 +1,43 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class DeviceLabel(base.SysinvObject):
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'host_id': utils.str_or_none,
'host_uuid': utils.str_or_none,
'label_key': utils.str_or_none,
'label_value': utils.str_or_none,
'pcidevice_id': utils.int_or_none,
'pcidevice_uuid': utils.str_or_none,
'fpgadevice_id': utils.int_or_none,
'fpgadevice_uuid': utils.str_or_none,
'capabilities': utils.dict_or_none,
}
_foreign_fields = {
'host_uuid': 'host:uuid',
'pcidevice_uuid': 'pcidevice:uuid',
'fpgadevice_uuid': 'fpgadevice:uuid',
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.device_label_get(uuid)
def save_changes(self, context, updates):
self.dbapi.device_label_update(self.uuid, # pylint: disable=no-member
updates)

View File

@ -0,0 +1,41 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
class FPGADevice(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'host_id': utils.int_or_none,
'host_uuid': utils.str_or_none,
'pci_id': utils.int_or_none,
'pciaddr': utils.str_or_none,
'bmc_build_version': utils.str_or_none,
'bmc_fw_version': utils.str_or_none,
'root_key': utils.str_or_none,
'revoked_key_ids': utils.str_or_none,
'boot_page': utils.str_or_none,
'bitstream_id': utils.str_or_none,
}
_foreign_fields = {
'host_uuid': 'host:uuid'
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.fpga_device_get(uuid)
def save_changes(self, context, updates):
self.dbapi.fpga_device_update(self.uuid, # pylint: disable=no-member
updates)

Some files were not shown because too many files have changed in this diff Show More