Browse Source

Merge remote-tracking branch 'gerrit/master' into f/centos8

Change-Id: I85724a269314c46969c064ec52ad05ac7fffebd4
Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
changes/12/729812/1
Shuicheng Lin 7 months ago
parent
commit
539d476456
100 changed files with 5089 additions and 846 deletions
  1. +575
    -2
      api-ref/source/api-ref-sysinv-v1-config.rst
  2. +1
    -1
      config-gate/centos/build_srpm.data
  3. +1
    -1
      config-gate/files/config.service
  4. +1
    -1
      controllerconfig/centos/build_srpm.data
  5. +3
    -2
      controllerconfig/centos/controllerconfig.spec
  6. +0
    -2
      controllerconfig/controllerconfig/controllerconfig/common/constants.py
  7. +125
    -62
      controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py
  8. +30
    -38
      controllerconfig/controllerconfig/controllerconfig/upgrades/management.py
  9. +24
    -0
      controllerconfig/controllerconfig/scripts/controller_config
  10. +95
    -0
      controllerconfig/controllerconfig/scripts/upgrade_swact_migration.py
  11. +0
    -133
      controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py
  12. +0
    -104
      controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py
  13. +3
    -2
      controllerconfig/opensuse/controllerconfig.spec
  14. +1
    -1
      storageconfig/centos/build_srpm.data
  15. +1
    -1
      sysinv/cgts-client/centos/build_srpm.data
  16. +1
    -0
      sysinv/cgts-client/centos/cgts-client.spec
  17. +1
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/client.py
  18. +5
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/common/base.py
  19. +15
    -2
      sysinv/cgts-client/cgts-client/cgtsclient/common/http.py
  20. +123
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_controllerfs.py
  21. +131
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_controllerfs_shell.py
  22. +7
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/v1/client.py
  23. +28
    -8
      sysinv/cgts-client/cgts-client/cgtsclient/v1/controller_fs_shell.py
  24. +81
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/device_image.py
  25. +157
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/device_image_shell.py
  26. +23
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/device_image_state.py
  27. +24
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/device_image_state_shell.py
  28. +40
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/device_label.py
  29. +120
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/device_label_shell.py
  30. +17
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/host_fs.py
  31. +53
    -0
      sysinv/cgts-client/cgts-client/cgtsclient/v1/host_fs_shell.py
  32. +31
    -2
      sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py
  33. +11
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/v1/ihost.py
  34. +12
    -4
      sysinv/cgts-client/cgts-client/cgtsclient/v1/pci_device.py
  35. +13
    -6
      sysinv/cgts-client/cgts-client/cgtsclient/v1/pci_device_shell.py
  36. +7
    -1
      sysinv/cgts-client/cgts-client/cgtsclient/v1/shell.py
  37. +1
    -0
      sysinv/cgts-client/cgts-client/requirements.txt
  38. +3
    -2
      sysinv/cgts-client/cgts-client/test-requirements.txt
  39. +1
    -1
      sysinv/sysinv-agent/centos/build_srpm.data
  40. +2
    -2
      sysinv/sysinv-agent/centos/sysinv-agent.spec
  41. +1
    -1
      sysinv/sysinv-agent/sysinv-agent.service
  42. +1
    -1
      sysinv/sysinv/centos/build_srpm.data
  43. +2
    -4
      sysinv/sysinv/sysinv/setup.cfg
  44. +1
    -1
      sysinv/sysinv/sysinv/sysinv/_i18n.py
  45. +36
    -1
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/__init__.py
  46. +5
    -26
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/cpu_utils.py
  47. +478
    -0
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/device_image.py
  48. +152
    -0
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/device_image_state.py
  49. +244
    -0
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/device_label.py
  50. +72
    -1
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py
  51. +154
    -4
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host_fs.py
  52. +1
    -1
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/interface_network.py
  53. +12
    -2
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_app.py
  54. +40
    -1
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/pci_device.py
  55. +2
    -1
      sysinv/sysinv/sysinv/sysinv/api/controllers/v1/route.py
  56. +2
    -0
      sysinv/sysinv/sysinv/sysinv/api/hooks.py
  57. +31
    -0
      sysinv/sysinv/sysinv/sysinv/cmd/utils.py
  58. +1
    -1
      sysinv/sysinv/sysinv/sysinv/common/ceph.py
  59. +24
    -6
      sysinv/sysinv/sysinv/sysinv/common/constants.py
  60. +26
    -0
      sysinv/sysinv/sysinv/sysinv/common/device.py
  61. +91
    -1
      sysinv/sysinv/sysinv/sysinv/common/exception.py
  62. +10
    -45
      sysinv/sysinv/sysinv/sysinv/common/health.py
  63. +31
    -2
      sysinv/sysinv/sysinv/sysinv/common/kubernetes.py
  64. +17
    -0
      sysinv/sysinv/sysinv/sysinv/common/service_parameter.py
  65. +0
    -18
      sysinv/sysinv/sysinv/sysinv/common/storage_backend_conf.py
  66. +8
    -0
      sysinv/sysinv/sysinv/sysinv/common/utils.py
  67. +1
    -1
      sysinv/sysinv/sysinv/sysinv/conductor/ceph.py
  68. +64
    -26
      sysinv/sysinv/sysinv/sysinv/conductor/kube_app.py
  69. +326
    -146
      sysinv/sysinv/sysinv/sysinv/conductor/manager.py
  70. +49
    -1
      sysinv/sysinv/sysinv/sysinv/conductor/rpcapi.py
  71. +523
    -1
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/api.py
  72. +14
    -0
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/099_placeholder.py
  73. +14
    -0
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/100_placeholder.py
  74. +14
    -0
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/101_placeholder.py
  75. +14
    -0
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/102_placeholder.py
  76. +14
    -0
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/103_placeholder.py
  77. +78
    -0
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/104_fpga_devices.py
  78. +201
    -0
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/migrate_repo/versions/105_device_images.py
  79. +163
    -1
      sysinv/sysinv/sysinv/sysinv/db/sqlalchemy/models.py
  80. +4
    -4
      sysinv/sysinv/sysinv/sysinv/helm/ceph_pools_audit.py
  81. +33
    -1
      sysinv/sysinv/sysinv/sysinv/helm/cinder.py
  82. +1
    -3
      sysinv/sysinv/sysinv/sysinv/helm/common.py
  83. +1
    -2
      sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_data.py
  84. +1
    -2
      sysinv/sysinv/sysinv/sysinv/helm/elasticsearch_master.py
  85. +24
    -9
      sysinv/sysinv/sysinv/sysinv/helm/filebeat.py
  86. +2
    -1
      sysinv/sysinv/sysinv/sysinv/helm/helm.py
  87. +8
    -17
      sysinv/sysinv/sysinv/sysinv/helm/logstash.py
  88. +0
    -3
      sysinv/sysinv/sysinv/sysinv/helm/manifest_monitor.py
  89. +17
    -0
      sysinv/sysinv/sysinv/sysinv/helm/mariadb.py
  90. +68
    -70
      sysinv/sysinv/sysinv/sysinv/helm/metricbeat.py
  91. +0
    -20
      sysinv/sysinv/sysinv/sysinv/helm/monitor_version_check.py
  92. +0
    -23
      sysinv/sysinv/sysinv/sysinv/helm/openstack_version_check.py
  93. +4
    -4
      sysinv/sysinv/sysinv/sysinv/helm/rbd_provisioner.py
  94. +16
    -12
      sysinv/sysinv/sysinv/sysinv/helm/utils.py
  95. +15
    -1
      sysinv/sysinv/sysinv/sysinv/objects/__init__.py
  96. +45
    -0
      sysinv/sysinv/sysinv/sysinv/objects/device_image.py
  97. +40
    -0
      sysinv/sysinv/sysinv/sysinv/objects/device_image_label.py
  98. +42
    -0
      sysinv/sysinv/sysinv/sysinv/objects/device_image_state.py
  99. +43
    -0
      sysinv/sysinv/sysinv/sysinv/objects/device_label.py
  100. +41
    -0
      sysinv/sysinv/sysinv/sysinv/objects/fpga_device.py

+ 575
- 2
api-ref/source/api-ref-sysinv-v1-config.rst View File

@@ -2036,7 +2036,7 @@ itemNotFound (404)
::
{
{
"istors":[
{
"function":"osd",
@@ -5721,6 +5721,14 @@ itemNotFound (404)
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
"needs_firmware_update (optional) ", "plain", "xsd:string", "Indicates whether the device requires firmware update."
"status (optional) ", "plain", "xsd:string", "The status of firmware update of the device."
"root_key (optional) ", "plain", "xsd:string", "The root key of the FPGA device."
"revoked_key_ids (optional) ", "plain", "xsd:string", "The revoked key ids of the FPGA device."
"boot_page (optional) ", "plain", "xsd:string", "The boot page of the FPGA device."
"bitstream_id (optional) ", "plain", "xsd:string", "The bitstream id of the FPGA device."
"bmc_build_version (optional) ", "plain", "xsd:string", "The BMC build version of the FPGA device."
"bmc_fw_version (optional) ", "plain", "xsd:string", "The BMC firmware version of the FPGA device."
::
@@ -6109,7 +6117,47 @@ itemNotFound (404)
"psvendor": "",
"enabled": "False",
"name": "pci_0000_00_0b_0"
}
},
{
"links": [
{
"href": "http://192.168.204.1:6385/v1/pci_devices/3ab614a6-3906-4c55-8114-4d78a6dde445",
"rel": "self"
},
{
"href": "http://192.168.204.1:6385/pci_devices/3ab614a6-3906-4c55-8114-4d78a6dde445",
"rel": "bookmark"
}
],
"enabled": true,
"updated_at": "2020-05-04T18:54:03.679744+00:00",
"needs_firmware_update": false,
"bitstream_id": null,
"uuid": "3ab614a6-3906-4c55-8114-4d78a6dde445",
"pdevice": "Device 0b30",
"boot_page": null,
"psvendor": "Intel Corporation",
"psdevice": "Device 0000",
"pclass_id": "120000",
"pvendor": "Intel Corporation",
"status": null,
"sriov_numvfs": 0,
"driver": "intel-fpga-pci",
"bmc_fw_version": null,
"root_key": null,
"host_uuid": "35436a7d-ce05-4e5f-87ac-706fe7513ece",
"bmc_build_version": null,
"name": "pci_0000_b3_00_0",
"revoked_key_ids": null,
"numa_node": 1,
"created_at": "2020-05-04T18:23:34.697710+00:00",
"pdevice_id": "0b30",
"pclass": "Processing accelerators",
"sriov_vfs_pci_address": "",
"sriov_totalvfs": 1,
"pciaddr": "0000:b3:00.0",
"pvendor_id": "8086"
},
]
}
@@ -6310,6 +6358,531 @@ badMediaType (415)
"pvendor_id": "8086"
}
--------------
Device images
--------------
************************
List the device images
************************
.. rest_method:: GET /v1/device_images
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_images (Optional)", "plain", "xsd:list", "The list of device images."
"bitstream_type (Optional)", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor (Optional)", "plain", "xsd:string", "The vendor ID of the pci device."
"pci_device (Optional)", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key signature of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels":
{
"key1": "value1",
"key2": "value2"
},
},
{
"uuid": "09100124-5ae9-44d8-aefc-a192b8f27360",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "root-key",
"bitstream_id": null
"key_signature": "a123",
"revoke_key_id": null,
"name": "Image name",
"description": null,
"image_version": null,
"applied_labels": null,
},
{
"uuid": "ef4c39b1-81e9-42dd-b850-06fc8833b47c",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "key-revocation",
"bitstream_id": null
"key_signature": null,
"revoke_key_id": 123,
"name": "Image name",
"description": null,
"image_version": null,
"applied_labels": null,
},
]
}
This operation does not accept a request body.
**************************************************
Shows attributes of the Device Image object
**************************************************
.. rest_method:: GET /v1/device_images/​{image_id}​
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_images (Optional)", "plain", "xsd:list", "The list of device images."
"bitstream_type (Optional)", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor (Optional)", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device (Optional)", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels":
{
"key1": "value1",
"key2": "value2"
},
}
]
}
************************
Creates a device image
************************
.. rest_method:: POST /v1/device_image
**Normal response codes**
200
**Error response codes**
badMediaType (415)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image. Valid types are ``functional``, ``root-key``, ``key-revocation``"
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image. Required for bitstream type ``functional`` "
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels": null
}
]
}
************************************************
Applies the device image to all hosts or label
************************************************
.. rest_method:: PATCH /v1/device_images/​{image_id}​?action=apply
**Normal response codes**
200
**Error response codes**
badMediaType (415)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
"device_label (Optional)", "plain", "xsd:string", "The key-value paired device label assigned to a device."
::
{
"key1": "value1"
}
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels":
{
"key1": "value1"
},
}
]
}
*******************************************
Remove the device image from host or label
*******************************************
.. rest_method:: PATCH /v1/device_images/​{image_id}​?action=remove
**Normal response codes**
200
**Error response codes**
badMediaType (415)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
"device_label (Optional)", "plain", "xsd:string", "The key-value paired device label assigned to a device."
::
{
"key1": "value1"
}
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"bitstream_type ", "plain", "xsd:string", "The bitstream type of the device image."
"pci_vendor ", "plain", "xsd:string", "The vendor ID of the pci device ."
"pci_device ", "plain", "xsd:string", "The device ID of the pci device."
"bitstream_id (Optional)", "plain", "xsd:string", "The bitstream id of the functional device image."
"key_signature (Optional)", "plain", "xsd:string", "The key id of the root-key device image."
"revoked_key_id (Optional)", "plain", "xsd:string", "The key revocation id of the key revocation device image."
"name (Optional)", "plain", "xsd:string", "The name of the device image."
"description (Optional)", "plain", "xsd:string", "The description of the device image."
"image_version (Optional)", "plain", "xsd:string", "The version of the device image."
"applied_labels (Optional)", "plain", "xsd:list", "The device image applied to the device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
::
{
"device_images": [
{
"uuid": "7e794693-2060-4e9e-b0bd-b281b059e8e4",
"pci_vendor": "8086",
"pci_device": "0b30",
"bitstream_type": "functional",
"bitstream_id": "1234",
"key_signature": null,
"revoke_key_id": null,
"description": null,
"name": null,
"image_version": null,
"applied_labels": null
}
]
}
*****************************
Deletes a device image
*****************************
.. rest_method:: DELETE /v1/device_images/​{image_id}​
**Normal response codes**
204
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"image_id", "URI", "csapi:UUID", "The unique identifier of a device image."
This operation does not accept a request body.
--------------
Device labels
--------------
************************
List the device labels
************************
.. rest_method:: GET /v1/device_labels
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_labels ", "plain", "xsd:list", "The list of device labels."
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
"pcidevice_uuid ", "plain", "csapi:UUID", "The universally unique identifier for the pci device object."
"host_uuid ", "plain", "csapi:UUID", "The universally unique identifier for the host object."
"label_key ", "plain", "xsd:string", "The key of the device label."
"label_value ", "plain", "xsd:string", "The value of the device label."
::
{
"device_labels": [
{
"uuid": "fe26ca98-35d4-43b7-8c51-f0ca957b35e1",
"pcidevice_uuid": "64641c6d-4fdd-4ecb-9c66-a68982267b6d",
"host_uuid": "32be8077-1174-46cf-8309-48c107765ffc"
"label_key": "key1",
"label_value": "value1",
},
{
"uuid": "60342a18-a686-48c4-8e71-13a005ffda1b",
"pcidevice_uuid": "9d69d492-9888-4d85-90d0-e52def926b17",
"host_uuid": "32be8077-1174-46cf-8309-48c107765ffc"
"label_key": "key5",
"label_value": "value5",
},
]
}
*************************************
Assign device label to a pci device
*************************************
.. rest_method:: POST /v1/device_labels
**Normal response codes**
200
**Error response codes**
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
itemNotFound (404)
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"pcidevice_uuid", "URI", "csapi:UUID", "The unique identifier of a pci device."
"device_labels", "URI", "xsd:list", "List of key-value paired of device labels."
::
{
"pcidevice_uuid": "da98f600-49cf-4f0e-b14e-15ef91069fe8",
"key1": "value1",
"key2": "value2"
}
**Response parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"uuid", "URI", "csapi:UUID", "The unique identifier of the device label object."
"pcidevice_uuid", "URI", "csapi:UUID", "The unique identifier of a pci device."
"label_key", "URI", "xsd:string", "The label key of device labels."
"label_value", "URI", "xsd:string", "The label value of device labels."
::
{
"device_labels": [
{
"uuid": "66daffb1-72ee-4e6e-9489-206c5eeaec94",
"pcidevice_uuid": "da98f600-49cf-4f0e-b14e-15ef91069fe8",
"label_key": "key1",
"label_value": "value1",
},
{
"uuid": "2e7821ed-e373-4cb8-a47b-f70ff2558dfd",
"pcidevice_uuid": "da98f600-49cf-4f0e-b14e-15ef91069fe8",
"label_key": "key2",
"label_value": "value2",
}
]
}
************************
Deletes a device label
************************
.. rest_method:: DELETE /v1/device_labels/​{device_label_uuid}​
**Normal response codes**
204
**Request parameters**
.. csv-table::
:header: "Parameter", "Style", "Type", "Description"
:widths: 20, 20, 20, 60
"device_label_uuid", "URI", "csapi:UUID", "The unique identifier of a device label."
This operation does not accept a request body.
------------------
Service Parameter
------------------


+ 1
- 1
config-gate/centos/build_srpm.data View File

@@ -1,2 +1,2 @@
SRC_DIR="files"
TIS_PATCH_VER=0
TIS_PATCH_VER=PKG_GITREVCOUNT

+ 1
- 1
config-gate/files/config.service View File

@@ -1,5 +1,5 @@
[Unit]
Description=General TIS config gate
Description=General StarlingX config gate
After=sw-patch.service
Before=serial-getty@ttyS0.service getty@tty1.service
# Each config service must have a Before statement against config.service, to ensure ordering


+ 1
- 1
controllerconfig/centos/build_srpm.data View File

@@ -1,2 +1,2 @@
SRC_DIR="controllerconfig"
TIS_PATCH_VER=152
TIS_PATCH_VER=PKG_GITREVCOUNT

+ 3
- 2
controllerconfig/centos/controllerconfig.spec View File

@@ -58,6 +58,7 @@ install -m 644 dist/*.whl $RPM_BUILD_ROOT/wheels/

install -d -m 755 %{buildroot}%{local_bindir}
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
install -p -D -m 700 scripts/upgrade_swact_migration.py %{buildroot}%{local_bindir}/upgrade_swact_migration.py

install -d -m 755 %{buildroot}%{local_goenabledd}
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
@@ -67,7 +68,7 @@ install -p -D -m 755 scripts/controller_config %{buildroot}%{local_etc_initd}/co

# Install Upgrade scripts
install -d -m 755 %{buildroot}%{local_etc_upgraded}
install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/
# install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/

install -d -m 755 %{buildroot}%{local_etc_systemd}
install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{local_etc_systemd}/controllerconfig.service
@@ -89,7 +90,7 @@ rm -rf $RPM_BUILD_ROOT
%{local_goenabledd}/*
%{local_etc_initd}/*
%dir %{local_etc_upgraded}
%{local_etc_upgraded}/*
# %{local_etc_upgraded}/*
%{local_etc_systemd}/*

%package wheels


+ 0
- 2
controllerconfig/controllerconfig/controllerconfig/common/constants.py View File

@@ -18,5 +18,3 @@ KEYRING_WORKDIR = '/tmp/python_keyring'
KEYRING_PERMDIR = tsconfig.KEYRING_PATH

INITIAL_CONFIG_COMPLETE_FILE = '/etc/platform/.initial_config_complete'

BACKUPS_PATH = '/opt/backups'

+ 125
- 62
controllerconfig/controllerconfig/controllerconfig/upgrades/controller.py View File

@@ -23,7 +23,6 @@ import tempfile
import time
import yaml


from sysinv.common import constants as sysinv_constants


@@ -52,6 +51,7 @@ LOG = log.getLogger(__name__)
POSTGRES_MOUNT_PATH = '/mnt/postgresql'
POSTGRES_DUMP_MOUNT_PATH = '/mnt/db_dump'
DB_CONNECTION_FORMAT = "connection=postgresql://%s:%s@127.0.0.1/%s\n"
DB_BARBICAN_CONNECTION_FORMAT = "postgresql://%s:%s@127.0.0.1/%s"

restore_patching_complete = '/etc/platform/.restore_patching_complete'
restore_compute_ready = '/var/run/.restore_compute_ready'
@@ -103,7 +103,8 @@ def get_db_credentials(shared_services, from_release):


def get_shared_services():
""" Get the list of shared services from the sysinv database """
""" Get the list of shared services from the sysinv database"""

shared_services = []
DEFAULT_SHARED_SERVICES = []

@@ -114,6 +115,7 @@ def get_shared_services():
if row is None:
LOG.error("Failed to fetch i_system data")
raise psycopg2.ProgrammingError("Failed to fetch i_system data")

cap_obj = json.loads(row[0])
region_config = cap_obj.get('region_config', None)
if region_config:
@@ -127,7 +129,10 @@ def get_connection_string(db_credentials, database):
""" Generates a connection string for a given database"""
username = db_credentials[database]['username']
password = db_credentials[database]['password']
return DB_CONNECTION_FORMAT % (username, password, database)
if database == 'barbican':
return DB_BARBICAN_CONNECTION_FORMAT % (username, password, database)
else:
return DB_CONNECTION_FORMAT % (username, password, database)


def create_temp_filesystem(vgname, lvname, mountpoint, size):
@@ -260,6 +265,50 @@ def migrate_pxeboot_config(from_release, to_release):
raise


def migrate_armada_config(from_release, to_release):
""" Migrates armada configuration. """

LOG.info("Migrating armada config")
devnull = open(os.devnull, 'w')

# Copy the entire armada.cfg directory to pick up any changes made
# after the data was migrated (i.e. updates to the controller-1 load).
source_armada = os.path.join(PLATFORM_PATH, "armada", from_release)
dest_armada = os.path.join(PLATFORM_PATH, "armada", to_release)
try:
subprocess.check_call(
["cp",
"-a",
os.path.join(source_armada),
os.path.join(dest_armada)],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to migrate %s" % source_armada)
raise


def migrate_helm_config(from_release, to_release):
""" Migrates helm configuration. """

LOG.info("Migrating helm config")
devnull = open(os.devnull, 'w')

# Copy the entire helm.cfg directory to pick up any changes made
# after the data was migrated (i.e. updates to the controller-1 load).
source_helm = os.path.join(PLATFORM_PATH, "helm", from_release)
dest_helm = os.path.join(PLATFORM_PATH, "helm", to_release)
try:
subprocess.check_call(
["cp",
"-a",
os.path.join(source_helm),
os.path.join(dest_helm)],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed to migrate %s" % source_helm)
raise


def migrate_sysinv_data(from_release, to_release):
""" Migrates sysinv data. """
devnull = open(os.devnull, 'w')
@@ -425,45 +474,44 @@ def create_databases(from_release, to_release, db_credentials):
""" Creates databases. """
LOG.info("Creating new databases")

if from_release == '18.03':
# Create databases that are new in this release

conn = psycopg2.connect('dbname=postgres user=postgres')

# Postgres won't allow transactions around database create operations
# so we set the connection to autocommit
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)

databases_to_create = []
if not databases_to_create:
return

with conn:
with conn.cursor() as cur:
for database in databases_to_create:
print("Creating %s database" % database)
username = psycopg2.extensions.AsIs(
'\"%s\"' % db_credentials[database]['username'])
db_name = psycopg2.extensions.AsIs('\"%s\"' % database)
password = db_credentials[database]['password']

try:
# Here we create the new database and the role for it
# The role will be used by the dbsync command to
# connect to the database. This ensures any new tables
# are added with the correct owner
cur.execute('CREATE DATABASE %s', (db_name,))
cur.execute('CREATE ROLE %s', (username,))
cur.execute('ALTER ROLE %s LOGIN PASSWORD %s',
(username, password))
cur.execute('GRANT ALL ON DATABASE %s TO %s',
(db_name, username))
except Exception as ex:
LOG.exception("Failed to create database and role. " +
"(%s : %s) Exception: %s" %
(database, username, ex))
raise
# Create databases that are new in this release

conn = psycopg2.connect('dbname=postgres user=postgres')

# Postgres won't allow transactions around database create operations
# so we set the connection to autocommit
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)

databases_to_create = []
if not databases_to_create:
return

with conn:
with conn.cursor() as cur:
for database in databases_to_create:
print("Creating %s database" % database)
username = psycopg2.extensions.AsIs(
'\"%s\"' % db_credentials[database]['username'])
db_name = psycopg2.extensions.AsIs('\"%s\"' % database)
password = db_credentials[database]['password']

try:
# Here we create the new database and the role for it
# The role will be used by the dbsync command to
# connect to the database. This ensures any new tables
# are added with the correct owner
cur.execute('CREATE DATABASE %s', (db_name,))
cur.execute('CREATE ROLE %s', (username,))
cur.execute('ALTER ROLE %s LOGIN PASSWORD %s',
(username, password))
cur.execute('GRANT ALL ON DATABASE %s TO %s',
(db_name, username))
except Exception as ex:
LOG.exception("Failed to create database and role. " +
"(%s : %s) Exception: %s" %
(database, username, ex))
raise


def migrate_sysinv_database():
@@ -497,15 +545,11 @@ def migrate_databases(from_release, shared_services, db_credentials,
f.write("[database]\n")
f.write(get_connection_string(db_credentials, 'keystone'))

with open("/etc/barbican/barbican-dbsync.conf", "w") as f:
f.write("[database]\n")
f.write(get_connection_string(db_credentials, 'barbican'))

migrate_commands = [
# Migrate barbican
('barbican',
'barbican-manage --config-file /etc/barbican/barbican-dbsync.conf ' +
'db upgrade'),
'barbican-manage db upgrade ' +
'--db-url %s' % get_connection_string(db_credentials, 'barbican')),
]

if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
@@ -616,20 +660,19 @@ def migrate_hiera_data(from_release, to_release):
shutil.copy(os.path.join(from_hiera_path, f), to_hiera_path)

# Make any necessary updates to the static yaml files.
if from_release == "18.03":
# Update the static.yaml file
static_file = os.path.join(constants.HIERADATA_PERMDIR, "static.yaml")
with open(static_file, 'r') as yaml_file:
static_config = yaml.load(yaml_file)
static_config.update({
'platform::params::software_version': SW_VERSION,
'platform::client::credentials::params::keyring_directory':
KEYRING_PATH,
'platform::client::credentials::params::keyring_file':
os.path.join(KEYRING_PATH, '.CREDENTIAL'),
})
with open(static_file, 'w') as yaml_file:
yaml.dump(static_config, yaml_file, default_flow_style=False)
# Update the static.yaml file
static_file = os.path.join(constants.HIERADATA_PERMDIR, "static.yaml")
with open(static_file, 'r') as yaml_file:
static_config = yaml.load(yaml_file)
static_config.update({
'platform::params::software_version': SW_VERSION,
'platform::client::credentials::params::keyring_directory':
KEYRING_PATH,
'platform::client::credentials::params::keyring_file':
os.path.join(KEYRING_PATH, '.CREDENTIAL'),
})
with open(static_file, 'w') as yaml_file:
yaml.dump(static_config, yaml_file, default_flow_style=False)


def upgrade_controller(from_release, to_release):
@@ -667,6 +710,14 @@ def upgrade_controller(from_release, to_release):
print("Migrating pxeboot configuration...")
migrate_pxeboot_config(from_release, to_release)

# Migrate armada config
print("Migrating armada configuration...")
migrate_armada_config(from_release, to_release)

# Migrate helm config
print("Migrating helm configuration...")
migrate_helm_config(from_release, to_release)

# Migrate sysinv data.
print("Migrating sysinv configuration...")
migrate_sysinv_data(from_release, to_release)
@@ -768,6 +819,18 @@ def upgrade_controller(from_release, to_release):
LOG.info("Failed to update hiera configuration")
raise

# Prepare for swact
LOG.info("Prepare for swact to controller-1")
try:
subprocess.check_call(['/usr/bin/upgrade_swact_migration.py',
'prepare_swact',
from_release,
to_release],
stdout=devnull)
except subprocess.CalledProcessError:
LOG.exception("Failed upgrade_swact_migration prepare_swact")
raise

print("Shutting down upgrade processes...")

# Stop postgres service


+ 30
- 38
controllerconfig/controllerconfig/controllerconfig/upgrades/management.py View File

@@ -15,7 +15,6 @@ import subprocess

import tsconfig.tsconfig as tsc

from controllerconfig.common import constants
from sysinv.common import constants as sysinv_constants
from controllerconfig.upgrades import utils

@@ -24,34 +23,21 @@ from oslo_log import log
LOG = log.getLogger(__name__)


def get_upgrade_databases(shared_services):
def get_upgrade_databases(system_role, shared_services):

UPGRADE_DATABASES = ('postgres', 'template1', 'nova', 'sysinv',
'ceilometer', 'neutron', 'heat', 'nova_api', 'aodh',
'magnum', 'ironic', 'barbican')
UPGRADE_DATABASES = ('postgres', 'template1', 'sysinv',
'barbican')

UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (), 'template1': (),
'heat': (), 'nova': (), 'nova_api': (),
'sysinv': ('i_alarm',),
'neutron': (),
'aodh': (),
'magnum': (),
'ironic': (),
'barbican': (),
'ceilometer': ('metadata_bool',
'metadata_float',
'metadata_int',
'metadata_text',
'meter', 'sample', 'fault',
'resource')}

if sysinv_constants.SERVICE_TYPE_VOLUME not in shared_services:
UPGRADE_DATABASES += ('cinder',)
UPGRADE_DATABASE_SKIP_TABLES.update({'cinder': ()})

if sysinv_constants.SERVICE_TYPE_IMAGE not in shared_services:
UPGRADE_DATABASES += ('glance',)
UPGRADE_DATABASE_SKIP_TABLES.update({'glance': ()})
'barbican': ()}

if system_role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
UPGRADE_DATABASES += ('dcmanager', 'dcorch',)
UPGRADE_DATABASE_SKIP_TABLES.update({
'dcmanager': ('subcloud_alarms',),
'dcorch': ()
})

if sysinv_constants.SERVICE_TYPE_IDENTITY not in shared_services:
UPGRADE_DATABASES += ('keystone',)
@@ -60,12 +46,12 @@ def get_upgrade_databases(shared_services):
return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES


def export_postgres(dest_dir, shared_services):
def export_postgres(dest_dir, system_role, shared_services):
""" Export postgres databases """
devnull = open(os.devnull, 'w')
try:
upgrade_databases, upgrade_database_skip_tables = \
get_upgrade_databases(shared_services)
get_upgrade_databases(system_role, shared_services)
# Dump roles, table spaces and schemas for databases.
subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' +
'--schema-only > %s/%s' %
@@ -121,7 +107,7 @@ def prepare_upgrade(from_load, to_load, i_system):

# Export databases
shared_services = i_system.capabilities.get("shared_services", "")
export_postgres(dest_dir, shared_services)
export_postgres(dest_dir, i_system.distributed_cloud_role, shared_services)
export_vim(dest_dir)

# Export filesystems so controller-1 can access them
@@ -197,9 +183,18 @@ def create_simplex_backup(software_upgrade):
with open(metadata_filename, 'w') as metadata_file:
metadata_file.write(json_data)

# TODO: Switch this over to use Ansible
# backup_filename = get_upgrade_backup_filename(software_upgrade)
# backup_restore.backup(backup_filename, constants.BACKUPS_PATH)
backup_filename = get_upgrade_backup_filename(software_upgrade)
backup_vars = "platform_backup_file=%s.tgz backup_dir=%s" % (
backup_filename, tsc.PLATFORM_BACKUP_PATH)
args = [
'ansible-playbook',
'-e', backup_vars,
sysinv_constants.ANSIBLE_PLATFORM_BACKUP_PLAYBOOK]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = proc.communicate()
LOG.info(out)
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, args)
LOG.info("Create simplex backup complete")


@@ -254,15 +249,16 @@ def abort_upgrade(from_load, to_load, upgrade):
# Remove upgrade directories
upgrade_dirs = [
os.path.join(tsc.PLATFORM_PATH, "config", to_load),
os.path.join(tsc.PLATFORM_PATH, "armada", to_load),
os.path.join(tsc.PLATFORM_PATH, "helm", to_load),
os.path.join(tsc.ETCD_PATH, to_load),
os.path.join(utils.POSTGRES_PATH, "upgrade"),
os.path.join(utils.POSTGRES_PATH, to_load),
os.path.join(utils.RABBIT_PATH, to_load),
os.path.join(tsc.PLATFORM_PATH, "ironic", to_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", to_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", to_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", to_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", to_load),
os.path.join(tsc.PLATFORM_PATH, "ceilometer", to_load),
os.path.join(tsc.CONFIG_PATH, 'upgrades')
]

@@ -274,7 +270,7 @@ def abort_upgrade(from_load, to_load, upgrade):

simplex_backup_filename = get_upgrade_backup_filename(upgrade) + "*"
simplex_backup_files = glob.glob(os.path.join(
constants.BACKUPS_PATH, simplex_backup_filename))
tsc.PLATFORM_BACKUP_PATH, simplex_backup_filename))

for file in simplex_backup_files:
try:
@@ -328,16 +324,12 @@ def complete_upgrade(from_load, to_load):
os.path.join(utils.POSTGRES_PATH, "upgrade"),
os.path.join(utils.POSTGRES_PATH, from_load),
os.path.join(utils.RABBIT_PATH, from_load),
os.path.join(tsc.PLATFORM_PATH, "ironic", from_load),
os.path.join(tsc.PLATFORM_PATH, "nfv/vim", from_load),
os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load),
os.path.join(tsc.PLATFORM_PATH, "puppet", from_load),
os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load),
]

upgrade_dirs.append(
os.path.join(tsc.PLATFORM_PATH, "ceilometer", from_load))

for directory in upgrade_dirs:
try:
shutil.rmtree(directory)


+ 24
- 0
controllerconfig/controllerconfig/scripts/controller_config View File

@@ -319,6 +319,30 @@ start()
fi
fi

if [ -e $CONFIG_DIR/admin-ep-cert.pem ]
then
cp $CONFIG_DIR/admin-ep-cert.pem /etc/ssl/private/
if [ $? -ne 0 ]
then
fatal_error "Unable to copy $CONFIG_DIR/admin-ep-cert.pem to certificates dir"
fi
fi

if [ -e $CONFIG_DIR/dc-adminep-root-ca.crt ]
then
cp $CONFIG_DIR/dc-adminep-root-ca.crt /etc/pki/ca-trust/source/anchors/
if [ $? -ne 0 ]
then
fatal_error "Unable to copy $CONFIG_DIR/dc-adminep-root-ca.crt to certificates dir"
fi
# Update system trusted CA cert list with the new CA cert.
update-ca-trust extract
if [ $? -ne 0 ]
then
fatal_error "Unable to update system trusted CA certificate list"
fi
fi

if [ -e $CONFIG_DIR/openstack ]
then
if [ ! -e /etc/ssl/private/openstack ]


+ 95
- 0
controllerconfig/controllerconfig/scripts/upgrade_swact_migration.py View File

@@ -0,0 +1,95 @@
#!/usr/bin/python
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will perform upgrade preparation and migration operations for
# host-swact to controller-1.
#

import os
import shutil
import subprocess
import sys
import yaml

from oslo_log import log

LOG = log.getLogger(__name__)

ETCD_PATH = "/opt/etcd"
UPGRADE_CONTROLLER_1_FILE = "/etc/platform/.upgrade_swact_controller_1"


def main():
action = None
from_release = None
to_release = None
arg = 1

while arg < len(sys.argv):
if arg == 1:
action = sys.argv[arg]
elif arg == 2:
from_release = sys.argv[arg]
elif arg == 3:
to_release = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1

if action == "migrate_etcd":
try:
migrate_etcd_on_swact()
except Exception as ex:
LOG.exception(ex)
return 1
elif action == "prepare_swact":
upgrade_prepare_swact(from_release, to_release)
return 0


def upgrade_prepare_swact(from_release, to_release):
migrate_data = {
'from_release': from_release,
'to_release': to_release
}
with open(UPGRADE_CONTROLLER_1_FILE, 'w') as f:
yaml.dump(migrate_data, f, default_flow_style=False)


def migrate_etcd_on_swact():
with open(UPGRADE_CONTROLLER_1_FILE, 'r') as f:
document = yaml.safe_load(f)

from_release = document.get('from_release')
to_release = document.get('to_release')

dest_etcd = os.path.join(ETCD_PATH, to_release)

if os.path.exists(dest_etcd):
# The dest_etcd must not have already been created,
# however this can occur on a forced host-swact
LOG.info("skipping etcd migration %s already exists" %
dest_etcd)
return

if not os.path.isfile(UPGRADE_CONTROLLER_1_FILE):
LOG.info("skipping etcd migration, no request %s" %
UPGRADE_CONTROLLER_1_FILE)
return

source_etcd = os.path.join(ETCD_PATH, from_release)
try:
shutil.copytree(os.path.join(source_etcd),
os.path.join(dest_etcd))
os.remove(UPGRADE_CONTROLLER_1_FILE)
except subprocess.CalledProcessError:
LOG.exception("Failed to migrate %s" % source_etcd)
raise


if __name__ == "__main__":
sys.exit(main())

+ 0
- 133
controllerconfig/controllerconfig/upgrade-scripts/16-neutron-move-bindings-off-controller-1.py View File

@@ -1,133 +0,0 @@
#!/usr/bin/python3
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will remove all neutron bindings from controller-1.
# This is necessary to match the behaviour on controller-1 after
# the host is locked.
# This should be removed once we support data migration upon a
# swact to controller-1 during an upgrade.
import psycopg2

import sys

from psycopg2.extras import RealDictCursor
from oslo_log import log

LOG = log.getLogger(__name__)


def main():
action = None
from_release = None
to_release = None # noqa
arg = 1
while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1

log.configure()

if from_release == "18.03" and action == "migrate":
try:
move_routers_off_controller_1()
move_networks_off_controller_1()
move_port_bindings_off_controller_1()
move_dhcp_port_device_id_off_controller_1()
move_distributed_port_bindings_off_controller_1()
except Exception as ex:
LOG.exception(ex)
print(ex)
return 1


def run_cmd_postgres(cmd):
"""
This executes the given command as user postgres. This is necessary when
this script is run as root, which is the case on an upgrade activation.
"""
neutron_conn = psycopg2.connect("dbname=neutron user=postgres")
with neutron_conn:
with neutron_conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute(cmd)
LOG.info("Executing '%s'" % cmd)


def move_routers_off_controller_1():
"""
This function moves all routers hosted on controller-1 to controller-0.
This is required to match the DB state after controller-1 is locked as
part of the upgrade, at which point they will be automatically reschduled.
"""
cmd = ("UPDATE routerl3agentbindings SET l3_agent_id="
"(SELECT id FROM agents WHERE agent_type='L3 agent'"
" AND host='controller-0') WHERE l3_agent_id IN"
" (SELECT id FROM agents WHERE agent_type='L3 agent'"
" AND host='controller-1') AND (SELECT count(id)"
" FROM agents WHERE agent_type='L3 agent'"
" AND host='controller-0')=1;")
run_cmd_postgres(cmd)


def move_networks_off_controller_1():
"""
This function moves all dhcp bindings from controller-1 to controller-0.
This is required to match the DB state after controller-1 is locked as
part of the upgrade, at which point they will be automatically reschduled.
"""
cmd = ("UPDATE networkdhcpagentbindings SET dhcp_agent_id="
"(SELECT id FROM agents WHERE agent_type='DHCP agent'"
" AND host='controller-0') WHERE dhcp_agent_id IN"
" (SELECT id FROM agents WHERE agent_type='DHCP agent'"
" AND host='controller-1') AND (SELECT count(id)"
" FROM agents WHERE agent_type='DHCP agent'"
" AND host='controller-0')=1;")
run_cmd_postgres(cmd)


def move_dhcp_port_device_id_off_controller_1():
"""
This function updates all dhcp ports' device IDs bound to controller-0
over to controller-1. Note that because the prefix is based on hostname,
this prefix is constant for both controllers.
controller-0: "dhcpaebe17f8-776d-5ab6-9a5f-e9bdeeaca66f"
controller-1: "dhcpf42f2830-b2ec-5a2c-93f3-e3e3328e20a3"
"""
cmd = ("UPDATE ports SET device_id ="
" REPLACE(device_id,"
" 'dhcpf42f2830-b2ec-5a2c-93f3-e3e3328e20a3',"
" 'dhcpaebe17f8-776d-5ab6-9a5f-e9bdeeaca66f')"
" WHERE device_owner = 'network:dhcp';")
run_cmd_postgres(cmd)


def move_port_bindings_off_controller_1():
"""
This function moves all port bindings from controller-1 to controller-0.
"""
cmd = ("UPDATE ml2_port_bindings SET host='controller-0'"
" WHERE host='controller-1';")
run_cmd_postgres(cmd)


def move_distributed_port_bindings_off_controller_1():
"""
This function deletes all ml2_distributed_port_bindings on contorller-1.
"""
cmd = ("DELETE FROM ml2_distributed_port_bindings"
" WHERE host='controller-1';")
run_cmd_postgres(cmd)


if __name__ == "__main__":
sys.exit(main())

+ 0
- 104
controllerconfig/controllerconfig/upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py View File

@@ -1,104 +0,0 @@
#!/usr/bin/python3
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will update the storage backends for controller-1.
#

import json
import psycopg2
import sys

from sysinv.common import constants
from psycopg2.extras import RealDictCursor
from oslo_log import log

LOG = log.getLogger(__name__)

# Sections that need to be removed from retired Ceph cache tiering feature
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER = 'cache_tiering'
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_DESIRED = 'cache_tiering.desired'
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_APPLIED = 'cache_tiering.applied'


def main():
action = None
from_release = None
to_release = None # noqa
arg = 1

while arg < len(sys.argv):
if arg == 1:
from_release = sys.argv[arg]
elif arg == 2:
to_release = sys.argv[arg] # noqa
elif arg == 3:
action = sys.argv[arg]
else:
print("Invalid option %s." % sys.argv[arg])
return 1
arg += 1

log.configure()

if from_release == "18.03" and action == "migrate":
try:
cleanup_ceph_cache_tiering_service_parameters(from_release)
cleanup_ceph_personality_subtype(from_release)
except Exception as ex:
LOG.exception(ex)
return 1


def cleanup_ceph_cache_tiering_service_parameters(from_release):
conn = psycopg2.connect("dbname=sysinv user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
for s in [SERVICE_PARAM_SECTION_CEPH_CACHE_TIER,
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_DESIRED,
SERVICE_PARAM_SECTION_CEPH_CACHE_TIER_APPLIED]:
cur.execute("select * from service_parameter where service=%s "
"and section=%s", (constants.SERVICE_TYPE_CEPH,
s,))
parameters = cur.fetchall()
if not parameters:
LOG.info("No service_parameter data for section %s "
"found." % s)
continue

for p in parameters:
LOG.debug("Found %s/%s" % (p['section'], p['name']))

LOG.info("Removing ceph service parameters from section "
"%s" % s)
cur.execute("delete from service_parameter where service=%s "
"and section=%s", (constants.SERVICE_TYPE_CEPH,
s,))


def cleanup_ceph_personality_subtype(from_release):
conn = psycopg2.connect("dbname=sysinv user=postgres")
with conn:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
cur.execute("select hostname, capabilities from i_host")
parameters = cur.fetchall()
if not parameters:
LOG.info("No capabilities data found ")
return

for p in parameters:
LOG.debug("Found host capabilities %s/%s" %
(p['hostname'], p['capabilities']))
json_dict = json.loads(p['capabilities'])
if 'pers_subtype' in json_dict:
del json_dict['pers_subtype']

LOG.info("Removing ceph pers_subtype from capabilities")
cur.execute("update i_host set capabilities='%s';" %
json.dumps(json_dict))


if __name__ == "__main__":
sys.exit(main())

+ 3
- 2
controllerconfig/opensuse/controllerconfig.spec View File

@@ -56,6 +56,7 @@ Configuration for the Controller node.

install -d -m 755 %{buildroot}%{local_bindir}
install -p -D -m 700 scripts/openstack_update_admin_password %{buildroot}%{local_bindir}/openstack_update_admin_password
install -p -D -m 700 scripts/upgrade_swact_migration.py %{buildroot}%{local_bindir}/upgrade_swact_migration.py

install -d -m 755 %{buildroot}%{local_goenabledd}
install -p -D -m 700 scripts/config_goenabled_check.sh %{buildroot}%{local_goenabledd}/config_goenabled_check.sh
@@ -65,7 +66,7 @@ install -p -D -m 755 scripts/controller_config %{buildroot}%{local_etc_initd}/co

# Install Upgrade scripts
install -d -m 755 %{buildroot}%{local_etc_upgraded}
install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/
# install -p -D -m 755 upgrade-scripts/* %{buildroot}%{local_etc_upgraded}/

install -p -D -m 664 scripts/controllerconfig.service %{buildroot}%{_unitdir}/controllerconfig.service

@@ -96,7 +97,7 @@ rm -rf $RPM_BUILD_ROOT
%{local_goenabledd}/*
%{local_etc_initd}/*
%dir %{local_etc_upgraded}
%{local_etc_upgraded}/*
# %{local_etc_upgraded}/*
%{_unitdir}/*

#%%package wheels


+ 1
- 1
storageconfig/centos/build_srpm.data View File

@@ -1,2 +1,2 @@
SRC_DIR="storageconfig"
TIS_PATCH_VER=6
TIS_PATCH_VER=PKG_GITREVCOUNT

+ 1
- 1
sysinv/cgts-client/centos/build_srpm.data View File

@@ -1,2 +1,2 @@
SRC_DIR="cgts-client"
TIS_PATCH_VER=75
TIS_PATCH_VER=PKG_GITREVCOUNT

+ 1
- 0
sysinv/cgts-client/centos/cgts-client.spec View File

@@ -21,6 +21,7 @@ Requires: python3-keystoneclient
Requires: python3-oslo-i18n
Requires: python3-oslo-serialization
Requires: python3-oslo-utils
Requires: python3-requests-toolbelt
# Needed for python2 and python3 compatible
Requires: python3-six



+ 1
- 1
sysinv/cgts-client/cgts-client/cgtsclient/client.py View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2013-2018 Wind River Systems, Inc.
# Copyright (c) 2013-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#


+ 5
- 0
sysinv/cgts-client/cgts-client/cgtsclient/common/base.py View File

@@ -57,6 +57,11 @@ class Manager(object):
'POST', url, body=body, data=data)
return resp

def _upload_multipart(self, url, body, data=None):
resp = self.api.upload_request_with_multipart(
'POST', url, body=body, data=data)
return resp

def _json_get(self, url, body=None):
"""send a GET request and return a json serialized object"""
_, body = self.api.json_request('GET', url, body=body)


+ 15
- 2
sysinv/cgts-client/cgts-client/cgtsclient/common/http.py View File

@@ -15,13 +15,13 @@
# under the License.
#

import httplib2
import logging
import os
import requests
from requests_toolbelt import MultipartEncoder
import socket

import httplib2

import six
from six.moves.urllib.parse import urlparse

@@ -293,6 +293,19 @@ class HTTPClient(httplib2.Http):
data=data)
return req.json()

def upload_request_with_multipart(self, method, url, **kwargs):
self.authenticate_and_fetch_endpoint_url()
connection_url = self._get_connection_url(url)
fields = kwargs.get('data')
fields['file'] = (kwargs['body'], open(kwargs['body'], 'rb'))
enc = MultipartEncoder(fields)
headers = {'Content-Type': enc.content_type,
"X-Auth-Token": self.auth_token}
req = requests.post(connection_url,
data=enc,
headers=headers)
return req.json()

#################
# AUTHENTICATE
#################


+ 123
- 0
sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_controllerfs.py View File

@@ -0,0 +1,123 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

import copy
import testtools

from cgtsclient.tests import utils
import cgtsclient.v1.controller_fs

CONTROLLER_FS = {
'uuid': '66666666-7777-8888-9999-000000000000',
'name': 'cfs',
'size': 10,
'logical_volume': 'cfs-lv',
'replicated': True,
'state': 'available'
}

UPDATED_CONTROLLER_FS = copy.deepcopy(CONTROLLER_FS)
NEW_SIZE = 20
UPDATED_CONTROLLER_FS['size'] = NEW_SIZE
SYSTEM_UUID = "11111111-2222-3333-4444-5555-000000000000"

fixtures = {
'/v1/controller_fs':
{
'GET': (
{},
{"controller_fs": [CONTROLLER_FS]},
),
},
'/v1/controller_fs/%s' % CONTROLLER_FS['uuid']:
{
'GET': (
{},
CONTROLLER_FS,
),
'PATCH': (
{},
UPDATED_CONTROLLER_FS,
),
},
'/v1/isystems/%s/controller_fs/update_many' % SYSTEM_UUID:
{
'PUT': (
{},
{},
),
},
}


class ControllerFsManagerTest(testtools.TestCase):

def setUp(self):
super(ControllerFsManagerTest, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.mgr = cgtsclient.v1.controller_fs.ControllerFsManager(self.api)

def test_controller_fs_list(self):
controllerfs = self.mgr.list()
expect = [
('GET', '/v1/controller_fs', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(controllerfs), 1)

def test_controller_fs_show(self):
controllerfs = self.mgr.get(CONTROLLER_FS['uuid'])
expect = [
('GET', '/v1/controller_fs/%s' % CONTROLLER_FS['uuid'], {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(controllerfs.uuid, CONTROLLER_FS['uuid'])

def test_controller_fs_update(self):
patch = [
{
'op': 'replace',
'value': NEW_SIZE,
'path': '/size'
},
{
'op': 'replace',
'value': CONTROLLER_FS['name'],
'path': '/name'
}
]
controllerfs = self.mgr.update(CONTROLLER_FS['uuid'], patch)
expect = [
('PATCH', '/v1/controller_fs/%s' % CONTROLLER_FS['uuid'], {}, patch),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(controllerfs.size, NEW_SIZE)

def test_controller_fs_update_many(self):
# One patch is a list of two dictionaries.
# for update_many, this is a list of lists
patches = [
[
{
'op': 'replace',
'value': NEW_SIZE,
'path': '/size'
},
{
'op': 'replace',
'value': CONTROLLER_FS['name'],
'path': '/name'
}
]
]
self.mgr.update_many(SYSTEM_UUID, patches)
expect = [
('PUT', '/v1/isystems/%s/controller_fs/update_many' % SYSTEM_UUID, {}, patches),
]

# Since update_many is just a PUT, we don't expect any output from it, so we can't
# do a proper asert here. We just check if the request made is the one we expected.
self.assertEqual(self.api.calls, expect)

+ 131
- 0
sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_controllerfs_shell.py View File

@@ -0,0 +1,131 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

import copy
import mock

from cgtsclient.tests import test_shell
from cgtsclient.v1.controller_fs import ControllerFs
from cgtsclient.v1.isystem import isystem

FAKE_CONTROLLER_FS = {
'uuid': '66666666-7777-8888-9999-000000000000',
'name': 'fake',
'size': 10,
'logical_volume': 'fake-lv',
'replicated': True,
'state': 'available',
'created_at': None,
'updated_at': None
}

FAKE_ISYSTEM = {
'uuid': '11111111-2222-3333-4444-5555-000000000000'
}

MODIFY_CONTROLLER_FS = copy.deepcopy(FAKE_CONTROLLER_FS)
MODIFY_CONTROLLER_FS['size'] = 15
MODIFY_CONTROLLER_FS['state'] = 'drbd_fs_resizing_in_progress'


class ControllerFsTest(test_shell.ShellTest):

def setUp(self):
super(ControllerFsTest, self).setUp()

# Mock the client
p = mock.patch('cgtsclient.client._get_endpoint')
self.mock_cgtsclient_client_get_endpoint = p.start()
self.mock_cgtsclient_client_get_endpoint.return_value = \
'http://fakelocalhost:6385/v1'
self.addCleanup(p.stop)
p = mock.patch('cgtsclient.client._get_ksclient')
self.mock_cgtsclient_client_get_ksclient = p.start()
self.addCleanup(p.stop)

# Mock the ControllerFsManager
self.controller_fs_manager_list_result = [
ControllerFs(None, FAKE_CONTROLLER_FS, True)]

def mock_controller_fs_manager_list(obj):
return self.controller_fs_manager_list_result
self.mocked_controller_fs_manager_list = mock.patch(
'cgtsclient.v1.controller_fs.ControllerFsManager.list',
mock_controller_fs_manager_list)
self.mocked_controller_fs_manager_list.start()
self.addCleanup(self.mocked_controller_fs_manager_list.stop)

self.controller_fs_manager_get_result = \
ControllerFs(None, FAKE_CONTROLLER_FS, True)

def mock_controller_fs_manager_get(obj):
return self.controller_fs_manager_get_result
self.mocked_controller_fs_manager_get = mock.patch(
'cgtsclient.v1.controller_fs.ControllerFsManager.get',
mock_controller_fs_manager_get)
self.mocked_controller_fs_manager_get.start()
self.addCleanup(self.mocked_controller_fs_manager_get.stop)

def mock_controller_fs_manager_update_many(obj, system_uuid, patch_list):
return None

self.mocked_controller_fs_manager_update_many = mock.patch(
'cgtsclient.v1.controller_fs.ControllerFsManager.update_many',
mock_controller_fs_manager_update_many)
self.mocked_controller_fs_manager_update_many.start()
self.addCleanup(self.mocked_controller_fs_manager_update_many.stop)

# Mock isystemManager
self.isystem_manager_list_result = [
isystem(None, FAKE_ISYSTEM, None)]

def mock_isystem_manager_list(obj):
return self.isystem_manager_list_result

self.mocked_isystem_manager_list = mock.patch(
'cgtsclient.v1.isystem.isystemManager.list',
mock_isystem_manager_list)
self.mocked_isystem_manager_list.start()
self.addCleanup(self.mocked_isystem_manager_list.stop)

def test_controller_fs_list(self):
self.make_env()

results = self.shell("controllerfs-list --nowrap")

self.assertIn(str(FAKE_CONTROLLER_FS['uuid']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['name']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['size']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['logical_volume']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['replicated']), results)
self.assertIn(str(FAKE_CONTROLLER_FS['state']), results)

def test_controller_fs_show(self):
self.make_env()

result = self.shell("controllerfs-show fake")
self.assertIn(str(FAKE_CONTROLLER_FS['uuid']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['name']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['size']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['logical_volume']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['replicated']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['state']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['created_at']), result)
self.assertIn(str(FAKE_CONTROLLER_FS['updated_at']), result)

def test_controller_fs_modify(self):
self.make_env()
self.controller_fs_manager_list_result = [
ControllerFs(None, MODIFY_CONTROLLER_FS, True)]

results = self.shell("controllerfs-modify fake=15")

self.assertIn(str(MODIFY_CONTROLLER_FS['uuid']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['name']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['size']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['logical_volume']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['replicated']), results)
self.assertIn(str(MODIFY_CONTROLLER_FS['state']), results)

+ 7
- 1
sysinv/cgts-client/cgts-client/cgtsclient/v1/client.py View File

@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2019 Wind River Systems, Inc.
# Copyright (c) 2013-2020 Wind River Systems, Inc.
#


@@ -26,6 +26,9 @@ from cgtsclient.v1 import certificate
from cgtsclient.v1 import cluster
from cgtsclient.v1 import controller_fs
from cgtsclient.v1 import datanetwork
from cgtsclient.v1 import device_image
from cgtsclient.v1 import device_image_state
from cgtsclient.v1 import device_label
from cgtsclient.v1 import drbdconfig
from cgtsclient.v1 import ethernetport
from cgtsclient.v1 import fernet
@@ -165,3 +168,6 @@ class Client(http.HTTPClient):
self.kube_version = kube_version.KubeVersionManager(self)
self.kube_upgrade = kube_upgrade.KubeUpgradeManager(self)
self.kube_host_upgrade = kube_host_upgrade.KubeHostUpgradeManager(self)
self.device_image = device_image.DeviceImageManager(self)
self.device_image_state = device_image_state.DeviceImageStateManager(self)
self.device_label = device_label.DeviceLabelManager(self)

+ 28
- 8
sysinv/cgts-client/cgts-client/cgtsclient/v1/controller_fs_shell.py View File

@@ -1,5 +1,5 @@
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
# Copyright (c) 2013-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -39,6 +39,13 @@ def _print_controller_fs_show(controller_fs):
action='append',
default=[],
help="Modify controller filesystem sizes")
@utils.arg('--column',
action='append',
default=[],
help="Specify the column(s) to include, can be repeated")
@utils.arg('--format',
choices=['table', 'yaml', 'value'],
help="specify the output format, defaults to table")
def do_controllerfs_modify(cc, args):
"""Modify controller filesystem sizes."""

@@ -59,7 +66,7 @@ def do_controllerfs_modify(cc, args):
except exc.HTTPNotFound:
raise exc.CommandError('Failed to modify controller filesystems')

_print_controllerfs_list(cc)
_print_controllerfs_list(cc, args)


@utils.arg('name',
@@ -72,15 +79,28 @@ def do_controllerfs_show(cc, args):
_print_controller_fs_show(controller_fs)


def _print_controllerfs_list(cc):
def _print_controllerfs_list(cc, args):
controller_fs_list = cc.controller_fs.list()

field_labels = ['UUID', 'FS Name', 'Size in GiB', 'Logical Volume',
'Replicated', 'State']
fields = ['uuid', 'name', 'size', 'logical_volume', 'replicated', 'state']
utils.print_list(controller_fs_list, fields, field_labels, sortby=1)
if args.column:
fields = args.column
field_labels = args.column
else:
field_labels = ['UUID', 'FS Name', 'Size in GiB', 'Logical Volume',
'Replicated', 'State']
fields = ['uuid', 'name', 'size', 'logical_volume', 'replicated', 'state']

utils.print_list(controller_fs_list, fields, field_labels,
sortby=0, output_format=args.format)


@utils.arg('--column',
action='append',
default=[],
help="Specify the column(s) to include, can be repeated")
@utils.arg('--format',
choices=['table', 'yaml', 'value'],
help="specify the output format, defaults to table")
def do_controllerfs_list(cc, args):
"""Show list of controller filesystems"""
_print_controllerfs_list(cc)
_print_controllerfs_list(cc, args)

+ 81
- 0
sysinv/cgts-client/cgts-client/cgtsclient/v1/device_image.py View File

@@ -0,0 +1,81 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#

from cgtsclient.common import base
from cgtsclient.common import utils
from cgtsclient import exc


CREATIO