Merge remote-tracking branch 'origin/master' into f/centos8
Also replace python shebangs in setup section for controllerconfig.spec. Signed-off-by: Charles Short <charles.short@windriver.com> Change-Id: I9b2942b529d9375881efb1ecb15cc846ee64e0b4 Signed-off-by: Charles Short <charles.short@windriver.com>
This commit is contained in:
commit
75758b37a5
30
.zuul.yaml
30
.zuul.yaml
|
@ -12,6 +12,8 @@
|
|||
- sysinv-tox-flake8
|
||||
- sysinv-tox-pylint
|
||||
- sysinv-tox-bandit
|
||||
- controllerconfig-tox-py27
|
||||
- controllerconfig-tox-py36
|
||||
- controllerconfig-tox-flake8
|
||||
- controllerconfig-tox-pylint
|
||||
- cgtsclient-tox-py27
|
||||
|
@ -26,6 +28,8 @@
|
|||
- sysinv-tox-flake8
|
||||
- sysinv-tox-pylint
|
||||
- sysinv-tox-bandit
|
||||
- controllerconfig-tox-py27
|
||||
- controllerconfig-tox-py36
|
||||
- controllerconfig-tox-flake8
|
||||
- controllerconfig-tox-pylint
|
||||
- cgtsclient-tox-py27
|
||||
|
@ -108,6 +112,32 @@
|
|||
tox_envlist: bandit
|
||||
tox_extra_args: -c sysinv/sysinv/sysinv/tox.ini
|
||||
|
||||
- job:
|
||||
name: controllerconfig-tox-py27
|
||||
parent: tox
|
||||
description: Run py27 tests for controllerconfig
|
||||
nodeset: ubuntu-xenial
|
||||
required-projects:
|
||||
- starlingx/fault
|
||||
files:
|
||||
- controllerconfig/*
|
||||
vars:
|
||||
tox_envlist: py27
|
||||
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
|
||||
|
||||
- job:
|
||||
name: controllerconfig-tox-py36
|
||||
parent: tox
|
||||
description: Run py36 tests for controllerconfig
|
||||
nodeset: ubuntu-bionic
|
||||
required-projects:
|
||||
- starlingx/fault
|
||||
files:
|
||||
- controllerconfig/*
|
||||
vars:
|
||||
tox_envlist: py36
|
||||
tox_extra_args: -c controllerconfig/controllerconfig/tox.ini
|
||||
|
||||
- job:
|
||||
name: controllerconfig-tox-flake8
|
||||
parent: tox
|
||||
|
|
|
@ -525,6 +525,8 @@ itemNotFound (404)
|
|||
"timezone (Optional)", "plain", "xsd:string", "The timezone of the cloud system."
|
||||
"description (Optional)", "plain", "xsd:string", "A user-specified description of the cloud system."
|
||||
"location (Optional)", "plain", "xsd:string", "The user-specified location of the cloud system."
|
||||
"latitude (Optional)", "plain", "xsd:string", "The user-specified latitude GPS coordinate of the cloud system."
|
||||
"longitude (Optional)", "plain", "xsd:string", "The user-specified longitude GPS coordinate of the cloud system."
|
||||
"capabilities (Optional)", "plain", "xsd:dictionary", "System capabilities. <ul><li>sdn_enabled : (Boolean) Software Defined Networking enabled. </li><li>region_config : (Boolean) region selection: <ul><li>true : Secondary region. </li><li>false : Primary region. </li></ul></li><li>shared_services : Services provided by Primary region. </li><li>bm_region : Board Management controller network selection: <ul><li>External : OAM network. </li><li>Internal : Management network. </li></ul></li><li>cinder_backend : backend selection for Cinder. </li><li>vswitch_type : vSwitch selection. </li><li>security_feature : Selection of Spectre and Meltdown mitigation options. </li><li>https_enabled : (Boolean) selection of https mode for public URLs. </li></ul>"
|
||||
"contact (Optional)", "plain", "xsd:string", "The user-specified contact for the cloud system."
|
||||
"software_version (Optional)", "plain", "xsd:string", "Contains the Cloud Server Software Version and the Software Version of the underlying Linux Kernel."
|
||||
|
@ -555,6 +557,8 @@ itemNotFound (404)
|
|||
"updated_at": "2014-09-24T14:35:38.091392+00:00",
|
||||
"contact": null,
|
||||
"location": null,
|
||||
"latitude": null,
|
||||
"longitude": null,
|
||||
"description": "The Ottawa Cloud Test Lab.",
|
||||
"system_type": "Standard",
|
||||
"system_mode": "duplex",
|
||||
|
@ -591,6 +595,10 @@ The attributes of the System object that are modifiable are:
|
|||
|
||||
- location,
|
||||
|
||||
- latitude,
|
||||
|
||||
- longitude,
|
||||
|
||||
- sdn_enabled,
|
||||
|
||||
- contact.
|
||||
|
@ -616,6 +624,8 @@ badMediaType (415)
|
|||
"timezone (Optional)", "plain", "xsd:string", "The timezone of the cloud system."
|
||||
"description (Optional)", "plain", "xsd:string", "A user-specified description of the cloud system."
|
||||
"location (Optional)", "plain", "xsd:string", "The user-specified location of the cloud system."
|
||||
"latitude (Optional)", "plain", "xsd:string", "The user-specified latitude GPS coordinate of the cloud system."
|
||||
"longitude (Optional)", "plain", "xsd:string", "The user-specified longitude GPS coordinate of the cloud system."
|
||||
"capabilities (Optional)", "plain", "xsd:dictionary", "System capabilities. <ul><li>sdn_enabled : (Boolean) Software Defined Networking enabled. </li><li>region_config : (Boolean) region selection: <ul><li>true : Secondary region. </li><li>false : Primary region. </li></ul></li><li>shared_services : Services provided by Primary region. </li><li>bm_region : Board Management controller network selection: <ul><li>External : OAM network. </li><li>Internal : Management network. </li></ul></li><li>cinder_backend : backend selection for Cinder. </li><li>vswitch_type : vSwitch selection. </li><li>security_feature : Selection of Spectre and Meltdown mitigation options. </li><li>https_enabled : (Boolean) selection of https mode for public URLs. </li></ul>"
|
||||
"contact (Optional)", "plain", "xsd:string", "The user-specified contact for the cloud system."
|
||||
"software_version (Optional)", "plain", "xsd:string", "Contains the Cloud Server Software Version and the Software Version of the underlying Linux Kernel."
|
||||
|
@ -642,6 +652,16 @@ badMediaType (415)
|
|||
"value": "350 Terry Fox Dr, Kanata, Ontario, Canada",
|
||||
"op": "replace"
|
||||
}
|
||||
{
|
||||
"path": "/latitude",
|
||||
"value": "45.35189954974955",
|
||||
"op": "replace"
|
||||
}
|
||||
{
|
||||
"path": "/longitude",
|
||||
"value": "-75.91866628453701",
|
||||
"op": "replace"
|
||||
}
|
||||
{
|
||||
"path": "/contact",
|
||||
"value": "support@windriver.com",
|
||||
|
@ -684,6 +704,8 @@ badMediaType (415)
|
|||
"updated_at": "2017-07-31T17:44:06.051441+00:00",
|
||||
"created_at": "2017-07-31T17:35:46.836024+00:00",
|
||||
"location": "350 Terry Fox Dr, Kanata, Ontario, Canada",
|
||||
"latitude": "45.35189954974955",
|
||||
"longitude": "-75.91866628453701",
|
||||
"capabilities": {
|
||||
"sdn_enabled": true,
|
||||
"shared_services": "[]",
|
||||
|
@ -1044,7 +1066,7 @@ This operation does not accept a request body.
|
|||
Creates a partition on a specific disk of a host
|
||||
**************************************************
|
||||
|
||||
.. rest_method:: POST /v1/ihosts/{host_id}/partitions
|
||||
.. rest_method:: POST /v1/partitions
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
|
@ -1060,8 +1082,8 @@ badMediaType (415)
|
|||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"host_id", "URI", "csapi:UUID", "The unique identifier of an existing host."
|
||||
"ihost_uuid (Optional)", "plain", "csapi:UUID", "This parameter specifies the partition host uuid."
|
||||
"forihostid (Optional)", "plain", "xsd:string", "The ID of the host of this interface."
|
||||
"type_guid (Optional)", "plain", "csapi:UUID", "This parameter specifies the partition type guid."
|
||||
"idisk_uuid (Optional)", "plain", "csapi:UUID", "This parameter specifies the partition disk uuid."
|
||||
"size_mib (Optional)", "plain", "xsd:integer", "This parameter specifies the size of the partition."
|
||||
|
@ -1439,8 +1461,8 @@ badMediaType (415)
|
|||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"lvm_vg_name (Optional)", "plain", "xsd:string", "This parameter specifies the volume group name. Valid values are (is): ``nova-local``"
|
||||
"ihost_uuid (Optional)", "plain", "csapi:UUID", "This parameter specifies the compute host uuid."
|
||||
"lvm_vg_name", "plain", "xsd:string", "This parameter specifies the volume group name. Valid values are (is): ``nova-local``"
|
||||
"ihost_uuid", "plain", "csapi:UUID", "This parameter specifies the worker host uuid."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
|
@ -2392,9 +2414,10 @@ itemNotFound (404)
|
|||
"iinterfaces (Optional)", "plain", "xsd:list", "The list of L2 interfaces for a specific host."
|
||||
"ifname (Optional)", "plain", "xsd:string", "The user-specified name of the interface."
|
||||
"ifclass (Optional)", "plain", "xsd:string", "The class of the interface: ``platform``, ``data``, ``pci-passthrough`` or ``pci-sriov``."
|
||||
"iftype (Optional)", "plain", "xsd:string", "Indicates the type of L2 interface; ``ethernet`` or ``ae`` (aggregated ethernet or link aggregation (LAG)) or ``vlan`` (virtual lan)."
|
||||
"iftype (Optional)", "plain", "xsd:string", "Indicates the type of L2 interface; ``ethernet`` or ``ae`` (aggregated ethernet or link aggregation (LAG)) or ``vlan`` (virtual lan) or ``vf`` (sriov vf)."
|
||||
"aemode (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae``, this attribute indicates the basic mode of operation for the AE/LAG interface. Supported modes are: balanced round robin, active-backup, balanced xor, broadcast, 802.3ad, balance-tlb, balance-alb. NOTE only balanced xor and active-standby modes are supported by interfaces of ifclass=data."
|
||||
"txhashpolicy (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae`` and ``aemode : balanced``, this attribute indicates what packet headers the AE/LAG is using to distribute packets across the different links/ports of the AE/LAG group; ``layer2``, ``layer2+3`` or ``layer3+4``."
|
||||
"primary_reselect (Optional)", "plain", "xsd:string", "The reselection policy for the primary slave of the AE/LAG interface: ``always`` (default), ``better``or ``failure``. Only applicable if ``iftype : ae`` and ``aemode : active_standby`` and ``ifclass :platform``."
|
||||
"vlan_id (Optional)", "plain", "xsd:integer", "Only applicable if ``iftype : vlan``, this attribute indicates that the vlan interface id. A vlan id between 1 and 4094 (inclusive) must be selected. NOTE The vlan id must be unique for the host interface."
|
||||
"imac (Optional)", "plain", "xsd:string", "The MAC Address being used by the interface. In the case of AE/LAG, the MAC address of one of the physical ports of the AE/LAG group is used."
|
||||
"imtu (Optional)", "plain", "xsd:integer", "The Maximum Transmission Unit (MTU) of the interface, in bytes."
|
||||
|
@ -2408,6 +2431,7 @@ itemNotFound (404)
|
|||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
"uses (Optional)", "plain", "xsd:list", "Interfaces which the current interface uses."
|
||||
"used_by (Optional)", "plain", "xsd:list", "Interfaces which use the current interface."
|
||||
"max_tx_rate (Optional)", "plain", "xsd:integer", "The Maximum Transmission Rate of the interface, in Mbps. Only applicable if ``ifclass : pci-sriov`` and ``iftype : vf``"
|
||||
|
||||
::
|
||||
|
||||
|
@ -2428,6 +2452,7 @@ itemNotFound (404)
|
|||
}
|
||||
],
|
||||
"txhashpolicy": null,
|
||||
"primary_reselect": null,
|
||||
"schedpolicy": null,
|
||||
"imac": "08:00:27:80:aa:6e",
|
||||
"sriov_numvfs": 0,
|
||||
|
@ -2453,6 +2478,7 @@ itemNotFound (404)
|
|||
}
|
||||
],
|
||||
"txhashpolicy": "layer2",
|
||||
"primary_reselect": null,
|
||||
"schedpolicy": null,
|
||||
"imac": null,
|
||||
"sriov_numvfs": 0,
|
||||
|
@ -2509,6 +2535,7 @@ itemNotFound (404)
|
|||
"iftype (Optional)", "plain", "xsd:string", "Indicates the type of L2 interface; ``ethernet`` or ``ae`` (aggregated ethernet or link aggregation (LAG)) or ``vlan`` (virtual lan)."
|
||||
"aemode (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae``, this attribute indicates the basic mode of operation for the AE/LAG interface. Supported modes are: balanced round robin, active-backup, balanced xor, broadcast, 802.3ad, balance-tlb, balance-alb. NOTE only balanced xor and active-standby modes are supported by interfaces of ifclass=data."
|
||||
"txhashpolicy (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae`` and ``aemode : balanced``, this attribute indicates what packet headers the AE/LAG is using to distribute packets across the different links/ports of the AE/LAG group; ``layer2``, ``layer2+3`` or ``layer3+4``."
|
||||
"primary_reselect (Optional)", "plain", "xsd:string", "The reselection policy for the primary slave of the AE/LAG interface: ``always`` (default), ``better``or ``failure``. Only applicable if ``iftype : ae`` and ``aemode : active_standby`` and ``ifclass :platform``."
|
||||
"vlan_id (Optional)", "plain", "xsd:integer", "Only applicable if ``iftype : vlan``, this attribute indicates that the vlan interface id. A vlan id between 1 and 4094 (inclusive) must be selected. NOTE The vlan id must be unique for the host interface."
|
||||
"imac (Optional)", "plain", "xsd:string", "The MAC Address being used by the interface. In the case of AE/LAG, the MAC address of one of the physical ports of the AE/LAG group is used."
|
||||
"imtu (Optional)", "plain", "xsd:integer", "The Maximum Transmission Unit (MTU) of the interface, in bytes."
|
||||
|
@ -2520,6 +2547,7 @@ itemNotFound (404)
|
|||
"ports (Optional)", "plain", "xsd:list", "URIs to the physical ports of this interface."
|
||||
"uses (Optional)", "plain", "xsd:list", "Interfaces which the current interface uses."
|
||||
"used_by (Optional)", "plain", "xsd:list", "Interfaces which use the current interface."
|
||||
"max_tx_rate (Optional)", "plain", "xsd:integer", "The Maximum Transmission Rate of the interface, in Mbps. Only applicable if ``ifclass : pci-sriov`` and ``iftype : vf``"
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
|
@ -2539,6 +2567,7 @@ itemNotFound (404)
|
|||
}
|
||||
],
|
||||
"txhashpolicy" : "layer2",
|
||||
"primary_reselect": null,
|
||||
"schedpolicy" : null,
|
||||
"uuid" : "740a5bec-b7a8-4645-93ed-aea0d4cfbf86",
|
||||
"ihost_uuid" : "ff453a51-1d3b-437f-a65e-b2d163f79f85",
|
||||
|
@ -2598,15 +2627,17 @@ badMediaType (415)
|
|||
"host_id", "URI", "csapi:UUID", "The unique identifier of an existing host."
|
||||
"ifname (Optional)", "plain", "xsd:string", "The name for the interface."
|
||||
"ifclass (Optional)", "plain", "xsd:string", "The class of the interface: ``platform``, ``data``, ``pci-passthrough`` or ``pci-sriov``."
|
||||
"iftype (Optional)", "plain", "xsd:string", "The type of interface; i.e. ``ae`` or ``vlan``."
|
||||
"aemode (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae``, this attribute specifies whether the AE/LAG should operate as ``balanced`` or ``active_standby`` or ``802.3ad`` across its links. The ``balanced`` and ``active_standby`` are the only modes supported by ``data`` type interface. For ``mgmt`` type interface the ``802.3ad`` option must be selected."
|
||||
"iftype (Optional)", "plain", "xsd:string", "The type of interface; i.e. ``ae`` or ``vlan`` or ``ethernet``."
|
||||
"aemode (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae``, this attribute specifies whether the AE/LAG should operate as ``balanced`` or ``active_standby`` or ``802.3ad`` across its links. The ``balanced`` and ``active_standby`` are the only modes supported by ``data`` type interface. For ``mgmt`` type interface the ``802.3ad`` and ``active_standby`` options are supported."
|
||||
"txhashpolicy (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae`` and ``aemode : balanced``, this attribute specifies what packet headers the AE/LAG should use to distribute packets across the different links/ports of the AE/LAG group; ``layer2``, ``layer2+3`` or ``layer3+4``."
|
||||
"primary_reselect (Optional)", "plain", "xsd:string", "The reselection policy for the primary slave of the AE/LAG interface: ``always`` (default), ``better``or ``failure``. Only applicable if ``iftype : ae`` and ``aemode : active_standby`` and ``ifclass :platform``."
|
||||
"vlan_id (Optional)", "plain", "xsd:integer", "Only applicable if ``iftype : vlan``, this attribute specifies a virtual lan id for a vlan interface type."
|
||||
"ports (Optional)", "plain", "xsd:list", "This attribute specifies a comma-separated list of ports that this interface contains. If ``iftype : ethernet`` then only one port is allowed."
|
||||
"uses (Optional)", "plain", "xsd:list", "Only applicable if ``iftype : ae`` or ``iftype: vlan``, this attribute specifies a comma-separated list of interfaces that this interface uses."
|
||||
"uses (Optional)", "plain", "xsd:list", "Only applicable if ``iftype : ae`` or ``iftype: vlan`` or ``iftype: ethernet``, this attribute specifies a comma-separated list of interfaces that this interface uses."
|
||||
"used_by (Optional)", "plain", "xsd:list", "This attribute specifies a comma-separated list of interfaces that use this interface."
|
||||
"imtu (Optional)", "plain", "xsd:integer", "This attribute specifies the interface's Maximum Transmit Unit."
|
||||
"sriov_numvfs (Optional)", "plain", "xsd:integer", "The number of VFs to configure on the interface's port; only applicable if ``ifclass : pci-sriov`` where only a single port is associated with the interface."
|
||||
"max_tx_rate (Optional)", "plain", "xsd:integer", "The Maximum Transmission Rate of the interface, in Mbps. Only applicable if ``ifclass : pci-sriov`` and ``iftype : vf``"
|
||||
"ihost_uuid (Optional)", "plain", "csapi:UUID", "The UUID of the host to create the interface on."
|
||||
|
||||
**Response parameters**
|
||||
|
@ -2620,6 +2651,7 @@ badMediaType (415)
|
|||
"iftype (Optional)", "plain", "xsd:string", "Indicates the type of L2 interface; ``ethernet`` or ``ae`` (aggregated ethernet or link aggregation (LAG)) or ``vlan`` (virtual lan)."
|
||||
"aemode (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae``, this attribute indicates the basic mode of operation for the AE/LAG interface. Supported modes are: balanced round robin, active-backup, balanced xor, broadcast, 802.3ad, balance-tlb, balance-alb. NOTE only balanced xor and active-standby modes are supported by interfaces of ifclass=data."
|
||||
"txhashpolicy (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae`` and ``aemode : balanced``, this attribute indicates what packet headers the AE/LAG is using to distribute packets across the different links/ports of the AE/LAG group; ``layer2``, ``layer2+3`` or ``layer3+4``."
|
||||
"primary_reselect (Optional)", "plain", "xsd:string", "The reselection policy for the primary slave of the AE/LAG interface: ``always`` (default), ``better``or ``failure``. Only applicable if ``iftype : ae`` and ``aemode : active_standby`` and ``ifclass :platform``."
|
||||
"vlan_id (Optional)", "plain", "xsd:integer", "Only applicable if ``iftype : vlan``, this attribute indicates that the vlan interface id. A vlan id between 1 and 4094 (inclusive) must be selected. NOTE The vlan id must be unique for the host interface."
|
||||
"imac (Optional)", "plain", "xsd:string", "The MAC Address being used by the interface. In the case of AE/LAG, the MAC address of one of the physical ports of the AE/LAG group is used."
|
||||
"imtu (Optional)", "plain", "xsd:integer", "The Maximum Transmission Unit (MTU) of the interface, in bytes."
|
||||
|
@ -2631,6 +2663,7 @@ badMediaType (415)
|
|||
"ports (Optional)", "plain", "xsd:list", "URIs to the physical ports of this interface."
|
||||
"uses (Optional)", "plain", "xsd:list", "Interfaces which the current interface uses."
|
||||
"used_by (Optional)", "plain", "xsd:list", "Interfaces which use the current interface."
|
||||
"max_tx_rate (Optional)", "plain", "xsd:integer", "The Maximum Transmission Rate of the interface, in Mbps. Only applicable if ``ifclass : pci-sriov`` and ``iftype : vf``"
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
|
@ -2641,6 +2674,7 @@ badMediaType (415)
|
|||
{
|
||||
"iftype": "ae",
|
||||
"txhashpolicy": "layer2",
|
||||
"primary_reselect": null,
|
||||
"ihost_uuid": "ff453a51-1d3b-437f-a65e-b2d163f79f85",
|
||||
"imtu": "1500",
|
||||
"ifclass": "data",
|
||||
|
@ -2679,6 +2713,7 @@ badMediaType (415)
|
|||
"ihost_uuid": "ff453a51-1d3b-437f-a65e-b2d163f79f85",
|
||||
"vlan_id": null,
|
||||
"txhashpolicy": "layer2",
|
||||
"primary_reselect": null,
|
||||
"created_at": "2014-09-29T10:55:20.515705+00:00",
|
||||
"schedpolicy": null,
|
||||
"imac": null,
|
||||
|
@ -2728,12 +2763,14 @@ badMediaType (415)
|
|||
"iftype (Optional)", "plain", "xsd:string", "The type of interface; i.e. ``ethernet`` or ``ae`` or ``vlan``."
|
||||
"aemode (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae``, this attribute specifies whether the AE/LAG should operate as ``balanced`` or ``active_standby`` across its links. These are the only modes supported by ``data`` type interface."
|
||||
"txhashpolicy (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae`` and ``aemode : balanced``, this attribute specifies what packet headers the AE/LAG should use to distribute packets across the different links/ports of the AE/LAG group; ``layer2``, ``layer2+3`` or ``layer3+4``."
|
||||
"primary_reselect (Optional)", "plain", "xsd:string", "The reselection policy for the primary slave of the AE/LAG interface: ``always`` (default), ``better``or ``failure``. Only applicable if ``iftype : ae`` and ``aemode : active_standby`` and ``ifclass :platform``."
|
||||
"vlan_id (Optional)", "plain", "xsd:integer", "Only applicable if ``iftype : vlan``, this attribute specifies a virtual lan id for a vlan interface type."
|
||||
"ports (Optional)", "plain", "xsd:list", "This attribute specifies a comma-separated list of ports that this interface contains. If ``iftype : ethernet`` then only one port is allowed."
|
||||
"uses (Optional)", "plain", "xsd:list", "Only applicable if ``iftype : ae`` or ``iftype: vlan``, this attribute specifies a comma-separated list of interfaces that this interface uses."
|
||||
"uses (Optional)", "plain", "xsd:list", "Only applicable if ``iftype : ae`` or ``iftype: vlan`` or ``iftype: ethernet``, this attribute specifies a comma-separated list of interfaces that this interface uses."
|
||||
"used_by (Optional)", "plain", "xsd:list", "This attribute specifies a comma-separated list of interfaces that use this interface."
|
||||
"imtu (Optional)", "plain", "xsd:integer", "This attribute specifies the interface's Maximum Transmit Unit."
|
||||
"sriov_numvfs (Optional)", "plain", "xsd:integer", "The number of VFs to configure on the interface's port; only applicable if ``ifclass : pci-sriov`` where only a single port is associated with the interface."
|
||||
"max_tx_rate (Optional)", "plain", "xsd:integer", "The Maximum Transmission Rate of the interface, in Mbps. Only applicable if ``ifclass : pci-sriov`` and ``iftype : vf``"
|
||||
|
||||
**Response parameters**
|
||||
|
||||
|
@ -2746,6 +2783,7 @@ badMediaType (415)
|
|||
"iftype (Optional)", "plain", "xsd:string", "Indicates the type of L2 interface; ``ethernet`` or ``ae`` (aggregated ethernet or link aggregation (LAG)) or ``vlan`` (virtual lan)."
|
||||
"aemode (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae``, this attribute indicates the basic mode of operation for the AE/LAG interface. Supported modes are: balanced round robin, active-backup, balanced xor, broadcast, 802.3ad, balance-tlb, balance-alb. NOTE only balanced xor and active-standby modes are supported by interfaces of ifclass=data."
|
||||
"txhashpolicy (Optional)", "plain", "xsd:string", "Only applicable if ``iftype : ae`` and ``aemode : balanced``, this attribute indicates what packet headers the AE/LAG is using to distribute packets across the different links/ports of the AE/LAG group; ``layer2``, ``layer2+3`` or ``layer3+4``."
|
||||
"primary_reselect (Optional)", "plain", "xsd:string", "The reselection policy for the primary slave of the AE/LAG interface: ``always`` (default), ``better``or ``failure``. Only applicable if ``iftype : ae`` and ``aemode : active_standby`` and ``ifclass :platform``."
|
||||
"vlan_id (Optional)", "plain", "xsd:integer", "Only applicable if ``iftype : vlan``, this attribute indicates that the vlan interface id. A vlan id between 1 and 4094 (inclusive) must be selected. NOTE The vlan id must be unique for the host interface."
|
||||
"imac (Optional)", "plain", "xsd:string", "The MAC Address being used by the interface. In the case of AE/LAG, the MAC address of one of the physical ports of the AE/LAG group is used."
|
||||
"imtu (Optional)", "plain", "xsd:integer", "The Maximum Transmission Unit (MTU) of the interface, in bytes."
|
||||
|
@ -2757,6 +2795,7 @@ badMediaType (415)
|
|||
"ports (Optional)", "plain", "xsd:list", "URIs to the physical ports of this interface."
|
||||
"uses (Optional)", "plain", "xsd:list", "Interfaces which the current interface uses."
|
||||
"used_by (Optional)", "plain", "xsd:list", "Interfaces which use the current interface."
|
||||
"max_tx_rate (Optional)", "plain", "xsd:integer", "The Maximum Transmission Rate of the interface, in Mbps. Only applicable if ``ifclass : pci-sriov`` and ``iftype : vf``"
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
|
@ -2780,6 +2819,11 @@ badMediaType (415)
|
|||
"value": "active_standby",
|
||||
"op": "replace"
|
||||
},
|
||||
{
|
||||
"path": "/primary_reselect",
|
||||
"value": "better",
|
||||
"op": "replace"
|
||||
},
|
||||
{
|
||||
"path": "/uses",
|
||||
"value": ['eth2','eth3'],
|
||||
|
@ -2816,6 +2860,7 @@ badMediaType (415)
|
|||
"ihost_uuid": "ff453a51-1d3b-437f-a65e-b2d163f79f85",
|
||||
"vlan_id": null,
|
||||
"txhashpolicy": "layer2",
|
||||
"primary_reselect": null,
|
||||
"created_at": "2014-09-29T10:55:20.515705+00:00",
|
||||
"schedpolicy": null,
|
||||
"imac": null,
|
||||
|
@ -4801,548 +4846,20 @@ badMediaType (415)
|
|||
SNMP Communities
|
||||
-----------------
|
||||
|
||||
****************************
|
||||
Lists all SNMP Communities
|
||||
****************************
|
||||
StarlingX supports SNMPv2c and SNMPv3, but it is now configured using Helm
|
||||
charts.
|
||||
|
||||
.. rest_method:: GET /v1/icommunity
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
|
||||
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
|
||||
itemNotFound (404)
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"icommunity (Optional)", "plain", "xsd:list", "The list of SNMP Communities."
|
||||
"access (Optional)", "plain", "xsd:string", "The SNMP GET/SET access control for a specific community."
|
||||
"community (Optional)", "plain", "xsd:string", "The community string of which the SNMP client is a member."
|
||||
"view (Optional)", "plain", "xsd:string", "The SNMP MIB view of which the community has access to."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"icommunity": [
|
||||
{
|
||||
"access": "ro",
|
||||
"uuid": "744cddaa-8a24-4573-aa0e-4f8b535d95b7",
|
||||
"community": "new",
|
||||
"view": ".1"
|
||||
},
|
||||
{
|
||||
"access": "ro",
|
||||
"uuid": "73706882-9d7c-4a8f-9409-185ffee0066c",
|
||||
"community": "guest",
|
||||
"view": ".1"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
***************************************************
|
||||
Shows information about a specific SNMP Community
|
||||
***************************************************
|
||||
|
||||
.. rest_method:: GET /v1/icommunity/{community_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
|
||||
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
|
||||
itemNotFound (404)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"community_id", "URI", "xsd:string", "The unique community string of an existing SNMP Community."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"access (Optional)", "plain", "xsd:string", "The SNMP GET/SET access control for a specific community."
|
||||
"community (Optional)", "plain", "xsd:string", "The community string of which the SNMP client is a member."
|
||||
"view (Optional)", "plain", "xsd:string", "The SNMP MIB view of which the community has access to."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"uuid": "73706882-9d7c-4a8f-9409-185ffee0066c",
|
||||
"created_at": "2014-09-24T20:06:54.386982+00:00",
|
||||
"updated_at": null,
|
||||
"community": "guest",
|
||||
"access": "ro",
|
||||
"view": ".1"
|
||||
}
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
**************************
|
||||
Creates a SNMP Community
|
||||
**************************
|
||||
|
||||
.. rest_method:: POST /v1/icommunity
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
badMediaType (415)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"community (Optional)", "plain", "xsd:string", "This parameter specifies the community string to create."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"access (Optional)", "plain", "xsd:string", "The SNMP GET/SET access control for a specific community."
|
||||
"community (Optional)", "plain", "xsd:string", "The community string of which the SNMP client is a member."
|
||||
"view (Optional)", "plain", "xsd:string", "The SNMP MIB view of which the community has access to."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"community": "guest"
|
||||
}
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"uuid": "73706882-9d7c-4a8f-9409-185ffee0066c",
|
||||
"created_at": "2014-09-24T20:06:54.386982+00:00",
|
||||
"updated_at": null,
|
||||
"community": "guest",
|
||||
"access": "ro",
|
||||
"view": ".1"
|
||||
}
|
||||
|
||||
************************************
|
||||
Modifies a specific SNMP Community
|
||||
************************************
|
||||
|
||||
.. rest_method:: PATCH /v1/icommunity/{community_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
badMediaType (415)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"community_id", "URI", "xsd:string", "The unique community string of an existing SNMP Community."
|
||||
"community (Optional)", "plain", "xsd:string", "This parameter specifies the new community string."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"access (Optional)", "plain", "xsd:string", "The SNMP GET/SET access control for a specific community."
|
||||
"community (Optional)", "plain", "xsd:string", "The community string of which the SNMP client is a member."
|
||||
"view (Optional)", "plain", "xsd:string", "The SNMP MIB view of which the community has access to."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
[
|
||||
{
|
||||
"path": "/community",
|
||||
"value": "wrs",
|
||||
"op": "replace"
|
||||
}
|
||||
]
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"uuid": "744cddaa-8a24-4573-aa0e-4f8b535d95b7",
|
||||
"created_at": "2014-09-23T15:01:53.187164+00:00",
|
||||
"updated_at": "2014-09-24T19:46:40.138145+00:00",
|
||||
"community": "wrs",
|
||||
"access": "ro",
|
||||
"view": ".1"
|
||||
}
|
||||
|
||||
***********************************
|
||||
Deletes a specific SNMP Community
|
||||
***********************************
|
||||
|
||||
.. rest_method:: DELETE /v1/icommunity/{community_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
204
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"community_id", "URI", "xsd:string", "The unique community string of an existing SNMP Community."
|
||||
|
||||
This operation does not accept a request body.
|
||||
For more information, see the `Fault Management Guide, SNMP overview <https://docs.starlingx.io/fault-mgmt/snmp-overview.html>`_ section.
|
||||
|
||||
-----------------------
|
||||
SNMP Trap Destinations
|
||||
-----------------------
|
||||
|
||||
**********************************
|
||||
Lists all SNMP Trap Destinations
|
||||
**********************************
|
||||
StarlingX supports SNMPv2c and SNMPv3, but it is now configured using Helm
|
||||
charts.
|
||||
|
||||
.. rest_method:: GET /v1/itrapdest
|
||||
For more information, see the `Fault Management Guide, SNMP overview <https://docs.starlingx.io/fault-mgmt/snmp-overview.html>`_ section.
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
|
||||
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
|
||||
itemNotFound (404)
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"itrapdests (Optional)", "plain", "xsd:list", "The list of SNMP Trap Destinations."
|
||||
"ip_address (Optional)", "plain", "xsd:string", "The IP address of a specific trap destination."
|
||||
"community (Optional)", "plain", "xsd:string", "The community of which the trap destination is a member."
|
||||
"type (Optional)", "plain", "xsd:string", "The SNMP version of the trap message for a specific destination."
|
||||
"port (Optional)", "plain", "xsd:integer", "The port number of which SNMP manager is listening for traps."
|
||||
"transport (Optional)", "plain", "xsd:string", "The transport protocol used by the trap messages."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"itrapdest": [
|
||||
{
|
||||
"uuid": "fc33945c-7aba-4d83-9216-a60db7097a23", "links": [
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/v1/itrapdest/fc33945c-7aba-4d83-9216-a60db7097a23",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/itrapdest/fc33945c-7aba-4d83-9216-a60db7097a23",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"ip_address": "10.10.10.1",
|
||||
"community": "cgts",
|
||||
"type": "snmpv2c_trap",
|
||||
"port": 162, "transport": "udp"
|
||||
},
|
||||
{
|
||||
"uuid": "22f0497c-0a09-41c4-8514-cb5afcbf930d", "links": [
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/v1/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"ip_address": "27.134.0.8",
|
||||
"community": "sprint",
|
||||
"type": "snmpv2c_trap",
|
||||
"port": 162,
|
||||
"transport": "udp"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
**********************************************************
|
||||
Shows information about a specific SNMP Trap Destination
|
||||
**********************************************************
|
||||
|
||||
.. rest_method:: GET /v1/itrapdest/{ip_address}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
|
||||
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
|
||||
itemNotFound (404)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"ip_address", "plain", "xsd:string", "The IP address of a specific trap destination."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"ip_address (Optional)", "plain", "xsd:string", "The IP address of a specific trap destination."
|
||||
"community (Optional)", "plain", "xsd:string", "The community of which the trap destination is a member."
|
||||
"type (Optional)", "plain", "xsd:string", "The SNMP version of the trap message for a specific destination."
|
||||
"port (Optional)", "plain", "xsd:integer", "The port number of which SNMP manager is listening for traps."
|
||||
"transport (Optional)", "plain", "xsd:string", "The transport protocol used by the trap messages."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"uuid": "22f0497c-0a09-41c4-8514-cb5afcbf930d", "links": [
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/v1/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"type": "snmpv2c_trap",
|
||||
"created_at": "2014-09-24T21:09:02.842231+00:00",
|
||||
"updated_at": null,
|
||||
"community": "sprint",
|
||||
"ip_address": "27.134.0.8",
|
||||
"port": 162,
|
||||
"transport": "udp"
|
||||
}
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
*********************************
|
||||
Creates a SNMP Trap Destination
|
||||
*********************************
|
||||
|
||||
.. rest_method:: POST /v1/itrapdest
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
badMediaType (415)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"ip_address (Optional)", "plain", "xsd:string", "This parameter specifies the IP address of a new trap destination."
|
||||
"community (Optional)", "plain", "xsd:string", "This parameter specifies the community of which the trap destination is a member."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"ip_address (Optional)", "plain", "xsd:string", "The IP address of a specific trap destination."
|
||||
"community (Optional)", "plain", "xsd:string", "The community of which the trap destination is a member."
|
||||
"type (Optional)", "plain", "xsd:string", "The SNMP version of the trap message for a specific destination."
|
||||
"port (Optional)", "plain", "xsd:integer", "The port number of which SNMP manager is listening for traps."
|
||||
"transport (Optional)", "plain", "xsd:string", "The transport protocol used by the trap messages."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"ip_address": "27.134.0.8",
|
||||
"community": "sprint"
|
||||
}
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"uuid": "22f0497c-0a09-41c4-8514-cb5afcbf930d", "links":
|
||||
[
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/v1/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"type": "snmpv2c_trap",
|
||||
"created_at": "2014-09-24T21:09:02.842231+00:00",
|
||||
"updated_at": null,
|
||||
"community": "sprint",
|
||||
"ip_address": "27.134.0.8",
|
||||
"port": 162,
|
||||
"transport": "udp"
|
||||
}
|
||||
|
||||
*******************************************
|
||||
Modifies a specific SNMP Trap Destination
|
||||
*******************************************
|
||||
|
||||
.. rest_method:: PATCH /v1/itrapdest/{trapdest_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
badMediaType (415)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"trapdest_id", "URI", "csapi:UUID", "The unique identifier of an existing SNMP Trap Destination."
|
||||
"ip_address (Optional)", "plain", "xsd:string", "This parameter specifies the IP address of a specific trap destination."
|
||||
"community (Optional)", "plain", "xsd:string", "This parameter specifies the community of which the trap destination is a member."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"ip_address (Optional)", "plain", "xsd:string", "The IP address of a specific trap destination."
|
||||
"community (Optional)", "plain", "xsd:string", "The community of which the trap destination is a member."
|
||||
"type (Optional)", "plain", "xsd:string", "The SNMP version of the trap message for a specific destination."
|
||||
"port (Optional)", "plain", "xsd:integer", "The port number of which SNMP manager is listening for traps."
|
||||
"transport (Optional)", "plain", "xsd:string", "The transport protocol used by the trap messages."
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
|
||||
::
|
||||
|
||||
[
|
||||
{
|
||||
"path": "/ip_address",
|
||||
"value": "47.10.1.128",
|
||||
"op": "replace"
|
||||
},
|
||||
{
|
||||
"path": "/community",
|
||||
"value": "sprint",
|
||||
"op": "replace"
|
||||
}
|
||||
]
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"uuid": "22f0497c-0a09-41c4-8514-cb5afcbf930d", "links": [
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/v1/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://192.168.204.2:6385/itrapdest/22f0497c-0a09-41c4-8514-cb5afcbf930d",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"type": "snmpv2c_trap",
|
||||
"created_at": "2014-09-24T21:09:02.842231+00:00",
|
||||
"updated_at": "2014-09-24T21:13:51.061300+00:00",
|
||||
"community": "sprint",
|
||||
"ip_address": "47.10.1.128",
|
||||
"port": 162,
|
||||
"transport": "udp"
|
||||
}
|
||||
|
||||
******************************************
|
||||
Deletes a specific SNMP Trap Destination
|
||||
******************************************
|
||||
|
||||
.. rest_method:: DELETE /v1/itrapdest/{trapdest_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
204
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"trapdest_id", "URI", "csapi:UUID", "The unique identifier of an existing SNMP Trap Destination."
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
--------
|
||||
Devices
|
||||
|
@ -5850,7 +5367,7 @@ This operation does not accept a request body.
|
|||
Shows the attributes of a specific PCI device
|
||||
***********************************************
|
||||
|
||||
.. rest_method:: GET /v1/devices/{device_id}
|
||||
.. rest_method:: GET /v1/pci_devices/{device_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
|
@ -5940,7 +5457,7 @@ This operation does not accept a request body.
|
|||
Modifies a specific PCI device
|
||||
********************************
|
||||
|
||||
.. rest_method:: PATCH /v1/devices/{device_id}
|
||||
.. rest_method:: PATCH /v1/pci_devices/{device_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
|
@ -9539,9 +9056,9 @@ badMediaType (415)
|
|||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"backend (Optional)", "plain", "xsd:string", "This parameter specifies the type of the backend. Valid values are (is): ``file``"
|
||||
"backend", "plain", "xsd:string", "This parameter specifies the type of the backend. Valid values are (is): ``file``"
|
||||
"name (Optional)", "plain", "xsd:string", "This parameter specifies the name of the backend."
|
||||
"services (Optional)", "plain", "xsd:string", "The name of the storage service."
|
||||
"services", "plain", "xsd:string", "The name of the storage service."
|
||||
"capabilities (Optional)", "plain", "xsd:string", "A dictionary of storage backend capabilities."
|
||||
"confirmed (Optional)", "plain", "xsd:boolean", "When ""false"" it will run in test mode without applying any modification. This allow checking a request for validity before performing non-reversible changes. When set to ""true"" the modifications are immediately applied."
|
||||
|
||||
|
@ -11092,7 +10609,7 @@ itemNotFound (404)
|
|||
Deletes a specific ceph monitor
|
||||
***********************************
|
||||
|
||||
.. rest_method:: DELETE /v1/ceph_mon/{ceph_mon_id}
|
||||
.. rest_method:: DELETE /v1/ceph_mon/{host_uuid}
|
||||
|
||||
Delete a ceph monitor.
|
||||
|
||||
|
@ -11106,7 +10623,7 @@ Delete a ceph monitor.
|
|||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"ceph_mon_id", "URI", "csapi:UUID", "The unique identifier of a Ceph monitor."
|
||||
"host_uuid", "URI", "csapi:UUID", "The host UUID that the Ceph monitor belongs to."
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
|
@ -11460,3 +10977,114 @@ Run the Docker registry garbage collector
|
|||
|
||||
This operation does not accept a request body.
|
||||
|
||||
|
||||
-------------------
|
||||
Kubernetes Clusters
|
||||
-------------------
|
||||
|
||||
These APIs allow for the retrieval of the Kubernetes cluster information, including
|
||||
the access information for remote administrative access.
|
||||
|
||||
****************************
|
||||
List the Kubernetes clusters
|
||||
****************************
|
||||
|
||||
.. rest_method:: GET /v1/kube_clusters
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
|
||||
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
|
||||
itemNotFound (404)
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"cluster_name", "plain", "xsd:string", "Kubernetes Cluster Name (kubernetes)"
|
||||
"cluster_version", "plain", "xsd:string", "Kubernetes Release Version"
|
||||
"cluster_api_endpoint", "plain", "xsd:string", "Cluster API Public Endpoint URL"
|
||||
"cluster_ca_cert ", "plain", "xsd:string", "Admin Root CA X.509 Certificate (PEM format)"
|
||||
"admin_client_cert", "plain", "xsd:string", "Admin Client X.509 Certificate (PEM format)"
|
||||
"admin_client_key", "plain", "xsd:string", "Admin Client Key (PEM format)"
|
||||
"admin_user", "plain", "xsd:string", "Admin User Name (kubernetes-admin)"
|
||||
"admin_token (Optional)", "plain", "xsd:string", "Admin service account token for admin_user, if configured (Base64 ASCII)"
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"kube_clusters": [
|
||||
{
|
||||
"cluster_name": "kubernetes",
|
||||
"cluster_version": "v1.18.1",
|
||||
"cluster_api_endpoint": "https://10.10.10.2:6443",
|
||||
"cluster_ca_cert": "REDACTED: <certificate-data>",
|
||||
"admin_client_cert": "REDACTED: <certificate-data>",
|
||||
"admin_client_key": "REDACTED: <key-data>",
|
||||
"admin_user": "kubernetes-admin",
|
||||
"admin_token": "REDACTED: <token-data>"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
********************************************************
|
||||
Get the Kubernetes cluster details of a specific cluster
|
||||
********************************************************
|
||||
|
||||
.. rest_method:: GET /v1/kube_clusters/{cluster_name}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
computeFault (400, 500, ...), serviceUnavailable (503), badRequest (400),
|
||||
unauthorized (401), forbidden (403), badMethod (405), overLimit (413),
|
||||
itemNotFound (404)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"cluster_name", "URI", "xsd:string", "The unique Kubernetes cluster name."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"cluster_name", "plain", "xsd:string", "Kubernetes Cluster Name (kubernetes)"
|
||||
"cluster_version", "plain", "xsd:string", "Kubernetes Release Version"
|
||||
"cluster_api_endpoint", "plain", "xsd:string", "Cluster API Public Endpoint URL"
|
||||
"cluster_ca_cert ", "plain", "xsd:string", "Admin Root CA Certificate (PEM format)"
|
||||
"admin_client_cert", "plain", "xsd:string", "Admin Client Certificate (PEM format)"
|
||||
"admin_client_key", "plain", "xsd:string", "Admin Client Key (PEM format)"
|
||||
"admin_user", "plain", "xsd:string", "Admin User Name (kubernetes-admin)"
|
||||
"admin_token (Optional)", "plain", "xsd:string", "Admin service account token for admin_user, if configured (base64 encoded)"
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"cluster_name": "kubernetes",
|
||||
"cluster_version": "v1.18.1",
|
||||
"cluster_api_endpoint": "https://10.10.10.2:6443",
|
||||
"cluster_ca_cert": "REDACTED: <certificate-data>",
|
||||
"admin_client_cert": "REDACTED: <certificate-data>",
|
||||
"admin_client_key": "REDACTED: <key-data>",
|
||||
"admin_user": "kubernetes-admin",
|
||||
"admin_token": "REDACTED: <token-data>"
|
||||
}
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
# workerconfig
|
||||
workerconfig
|
||||
workerconfig-standalone
|
||||
workerconfig-subfunction
|
||||
|
||||
# controllerconfig
|
||||
controllerconfig
|
||||
|
|
|
@ -16,6 +16,7 @@ Requires: psmisc
|
|||
Requires: sysinv
|
||||
Requires: systemd
|
||||
Requires: tsconfig
|
||||
Requires: python3-cryptography
|
||||
Requires: python3-iso8601
|
||||
Requires: python3-keyring
|
||||
Requires: python3-netaddr
|
||||
|
@ -39,8 +40,12 @@ Controller node configuration
|
|||
%define pythonroot %python3_sitearch
|
||||
%define debug_package %{nil}
|
||||
|
||||
%global __on_box_python /usr/bin/python3
|
||||
|
||||
%prep
|
||||
%setup
|
||||
# Change shebang in all relevant files in this directory and all subdirectories
|
||||
find -type f -exec sed -i '1s=^#!/usr/bin/\(python\|env python\)[23]\?=#!%{__on_box_python}=' {} +
|
||||
|
||||
%build
|
||||
%{__python3} setup.py build
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
[DEFAULT]
|
||||
test_path=./controllerconfig/tests
|
||||
top_dir=./controllerconfig
|
|
@ -0,0 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
|
@ -0,0 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
|
@ -0,0 +1,144 @@
|
|||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""Base test code to test migration scripts
|
||||
First, focus on the migration script name validation
|
||||
Second, the validation script sequence call
|
||||
"""
|
||||
|
||||
from mockproc import mockprocess
|
||||
from os import listdir
|
||||
from os.path import isfile
|
||||
from os.path import join
|
||||
from tempfile import mkdtemp
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from controllerconfig.upgrades import utils
|
||||
|
||||
|
||||
# The way to assert is to pass a script execution that writes the script file
|
||||
# name into a file
|
||||
# The content of the file will contain the sequence of the called scripts
|
||||
script_body = '''#! /usr/bin/env python
|
||||
with open('%s', 'a+') as f:
|
||||
f.write("%s")
|
||||
'''
|
||||
|
||||
from_release = "20.06"
|
||||
to_release = "20.12"
|
||||
action = "migrate"
|
||||
|
||||
# Lists to add scripts to be called, use a ":" separator for
|
||||
# parsing/asserting
|
||||
validScripts1 = ["71-bla1-bla2-bla3.sh", "8-bla1-bla2-bla3.py:",
|
||||
"21-bla1-bla2-bla3.sh:"]
|
||||
|
||||
validScripts2 = ["75-deployment-ns-upgrade.py:", "65-k8s-app-upgrade.sh:",
|
||||
"10-sysinv-adjust-partitions.py:",
|
||||
"60-helm-releases-data-migration.py:",
|
||||
"55-armada-helm-upgrade.py:",
|
||||
"95-apply-mandatory-psp-policies.py:",
|
||||
"10-sysinv-adjust-partitions.py:",
|
||||
"85-update-sc-admin-endpoint-cert.py:",
|
||||
"70-active-secured-etcd-after-upgrade.sh:",
|
||||
"50-dcmanager-subcloud-status-migration.py:",
|
||||
"45-sysinv-remove-identity-shared-service.py:",
|
||||
"25-coredns-configmap.sh:",
|
||||
"20-exempt-admin-from-lockout.sh:",
|
||||
"115-foo-bar-test-ok.sh:", "299-foo-bar-test-ok.sh:",
|
||||
"2123-foo-bar-test-ok.sh"]
|
||||
|
||||
invalidScripts1 = ["70-bla1-bla2-bla3.sh", "7-bla1-bla2-bla3.py:",
|
||||
"20-bla1-bla2-bla3.sh:", "-20-bla1-bla2-bla3.sh"]
|
||||
|
||||
invalidScripts2 = ["95-apply-mandatory-psp-policies.py",
|
||||
"10-sysinv-adjust-partitions.py:",
|
||||
"85-update-sc-admin-endpoint-cert.py:",
|
||||
"70_active-secured-etcd-after-upgrade.sh:"]
|
||||
|
||||
|
||||
# Append scripts to be executed according to the passed list
|
||||
def addScripts(self, scripts, output_filename):
|
||||
for script in scripts:
|
||||
self.scripts.append(script, returncode=0, script=script_body %
|
||||
(output_filename, script))
|
||||
|
||||
|
||||
# Test with the files under "controllerconfig/upgrade-scripts"
|
||||
def addRealMigrationScripts(self, output_filename):
|
||||
path = os.getcwd() + "/upgrade-scripts"
|
||||
for f in listdir(path):
|
||||
if isfile(join(path, f)):
|
||||
self.scripts.append(f, returncode=0, script=script_body %
|
||||
(output_filename, f))
|
||||
|
||||
|
||||
def assertProperSorted(scripts):
|
||||
output = False
|
||||
sequence = []
|
||||
for script in scripts:
|
||||
sequence.append(int(script.split("-")[0]))
|
||||
if sorted(sequence) == sequence:
|
||||
output = True
|
||||
return output
|
||||
|
||||
|
||||
class TestMigrationScripts(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.scripts_dir = mkdtemp()
|
||||
self.output_filename = mkdtemp() + "/output.txt"
|
||||
# Re-create the file for each run
|
||||
open(self.output_filename, 'w+').close()
|
||||
self.scripts = mockprocess.MockProc(self.scripts_dir)
|
||||
|
||||
def test_migration_scripts_success_1(self):
|
||||
addScripts(self, validScripts1, self.output_filename)
|
||||
with self.scripts:
|
||||
utils.execute_migration_scripts(from_release, to_release, action,
|
||||
self.scripts_dir)
|
||||
with open(self.output_filename, 'r') as f:
|
||||
output = str(f.read())
|
||||
if(assertProperSorted(output.split(':'))):
|
||||
pass
|
||||
|
||||
def test_migration_scripts_success_2(self):
|
||||
addScripts(self, validScripts2, self.output_filename)
|
||||
with self.scripts:
|
||||
utils.execute_migration_scripts(from_release, to_release, action,
|
||||
self.scripts_dir)
|
||||
with open(self.output_filename, 'r') as f:
|
||||
output = str(f.read())
|
||||
if(assertProperSorted(output.split(':'))):
|
||||
pass
|
||||
|
||||
def test_real_migration_scripts(self):
|
||||
addRealMigrationScripts(self, self.output_filename)
|
||||
with self.scripts:
|
||||
utils.execute_migration_scripts(from_release, to_release, action,
|
||||
self.scripts_dir)
|
||||
with open(self.output_filename, 'r') as f:
|
||||
output = str(f.read())
|
||||
if(assertProperSorted(output.split(':'))):
|
||||
pass
|
||||
|
||||
def test_migration_scripts_validation_fail_1(self):
|
||||
addScripts(self, invalidScripts1, self.output_filename)
|
||||
with self.assertRaises(ValueError):
|
||||
with self.scripts:
|
||||
utils.execute_migration_scripts(from_release, to_release,
|
||||
action, self.scripts_dir)
|
||||
|
||||
def test_migration_scripts_validation_fail_2(self):
|
||||
addScripts(self, invalidScripts2, self.output_filename)
|
||||
with self.assertRaises(ValueError):
|
||||
with self.scripts:
|
||||
utils.execute_migration_scripts(from_release, to_release,
|
||||
action, self.scripts_dir)
|
||||
|
||||
def tearDown(self):
|
||||
os.remove(self.output_filename)
|
|
@ -24,6 +24,7 @@ import time
|
|||
import yaml
|
||||
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
from sysinv.puppet import common as puppet_common
|
||||
|
||||
|
||||
# WARNING: The controller-1 upgrade is done before any puppet manifests
|
||||
|
@ -715,6 +716,25 @@ def migrate_hiera_data(from_release, to_release, role=None):
|
|||
from_hiera_path = os.path.join(PLATFORM_PATH, "puppet", from_release,
|
||||
"hieradata")
|
||||
to_hiera_path = constants.HIERADATA_PERMDIR
|
||||
|
||||
# For simplex upgrade, we already set etcd security config during
|
||||
# apply-bootstrap-manifest. Need to get it and update to target
|
||||
# static.yaml.
|
||||
static_file = os.path.join(to_hiera_path, "static.yaml")
|
||||
etcd_security_config = {}
|
||||
|
||||
if os.path.exists(static_file):
|
||||
with open(static_file, 'r') as yaml_file:
|
||||
static_config = yaml.load(yaml_file)
|
||||
|
||||
if 'platform::etcd::params::security_enabled' in static_config.keys():
|
||||
etcd_security_config['platform::etcd::params::security_enabled'] = \
|
||||
static_config['platform::etcd::params::security_enabled']
|
||||
etcd_security_config['platform::etcd::params::bind_address'] = \
|
||||
static_config['platform::etcd::params::bind_address']
|
||||
etcd_security_config['platform::etcd::params::bind_address_version'] = \
|
||||
static_config['platform::etcd::params::bind_address_version']
|
||||
|
||||
shutil.rmtree(to_hiera_path, ignore_errors=True)
|
||||
os.makedirs(to_hiera_path)
|
||||
|
||||
|
@ -757,11 +777,100 @@ def migrate_hiera_data(from_release, to_release, role=None):
|
|||
'openstack::keystone::bootstrap::dc_services_project_id':
|
||||
service_project_id
|
||||
})
|
||||
# Just for upgrade from STX4.0 to STX5.0
|
||||
if (from_release == SW_VERSION_20_06 and etcd_security_config):
|
||||
static_config.update(etcd_security_config)
|
||||
|
||||
if from_release == SW_VERSION_20_06:
|
||||
# The helm db is new in the release stx5.0 and requires
|
||||
# a password to be generated and a new user to access the DB.
|
||||
# This is required for all types of system upgrade. Should
|
||||
# removed in the release that follows stx5.0
|
||||
static_config.update({
|
||||
'platform::helm::v2::db::postgresql::user': 'admin-helmv2'
|
||||
})
|
||||
|
||||
helmv2_db_pw = utils.get_password_from_keyring('helmv2', 'database')
|
||||
if not helmv2_db_pw:
|
||||
helmv2_db_pw = utils.set_password_in_keyring('helmv2', 'database')
|
||||
|
||||
secure_static_file = os.path.join(
|
||||
constants.HIERADATA_PERMDIR, "secure_static.yaml")
|
||||
with open(secure_static_file, 'r') as yaml_file:
|
||||
secure_static_config = yaml.load(yaml_file)
|
||||
secure_static_config.update({
|
||||
'platform::helm::v2::db::postgresql::password': helmv2_db_pw
|
||||
})
|
||||
|
||||
# update below static secure config
|
||||
# sysinv::certmon::local_keystone_password
|
||||
# sysinv::certmon::dc_keystone_password
|
||||
sysinv_pass = utils.get_password_from_keyring('sysinv', 'services')
|
||||
secure_static_config.update({
|
||||
'sysinv::certmon::local_keystone_password': sysinv_pass
|
||||
})
|
||||
|
||||
dc_pass = ''
|
||||
if role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
|
||||
dc_pass = utils.get_password_from_keyring('dcmanager', 'services')
|
||||
|
||||
secure_static_config.update({
|
||||
'sysinv::certmon::dc_keystone_password': dc_pass
|
||||
})
|
||||
|
||||
with open(secure_static_file, 'w') as yaml_file:
|
||||
yaml.dump(secure_static_config, yaml_file,
|
||||
default_flow_style=False)
|
||||
|
||||
with open(static_file, 'w') as yaml_file:
|
||||
yaml.dump(static_config, yaml_file, default_flow_style=False)
|
||||
|
||||
|
||||
def apply_sriov_config(db_credentials, hostname):
|
||||
# If controller-1 has any FEC devices or sriov vfs configured, apply the
|
||||
# sriov runtime manifest. We can't apply it from controller-0 during the
|
||||
# host-unlock process as controller-1 is running the new release.
|
||||
database = 'sysinv'
|
||||
username = db_credentials[database]['username']
|
||||
password = db_credentials[database]['password']
|
||||
# psycopg2 can connect with the barbican string eg postgresql:// ...
|
||||
connection_string = DB_BARBICAN_CONNECTION_FORMAT % (
|
||||
username, password, database)
|
||||
conn = psycopg2.connect(connection_string)
|
||||
cur = conn.cursor()
|
||||
cur.execute(
|
||||
"select id, mgmt_ip from i_host where hostname=%s;", (hostname,))
|
||||
host = cur.fetchone()
|
||||
host_id = host[0]
|
||||
mgmt_ip = host[1]
|
||||
cur.execute("select id from pci_devices "
|
||||
"where sriov_numvfs > 0 and host_id=%s",
|
||||
(host_id,))
|
||||
fec_device = cur.fetchone()
|
||||
cur.execute("select id from interfaces "
|
||||
"where forihostid=%s and iftype='ethernet' "
|
||||
"and sriov_numvfs>0;",
|
||||
(host_id,))
|
||||
interface = cur.fetchone()
|
||||
if interface or fec_device:
|
||||
# There are FEC devices/sriov vfs configured, apply the sriov manifest
|
||||
LOG.info("Applying sriov/fec manifest")
|
||||
personality = sysinv_constants.WORKER
|
||||
classes = [
|
||||
'platform::interfaces::sriov::runtime',
|
||||
'platform::devices::fpga::fec::runtime'
|
||||
]
|
||||
config = {'classes': classes}
|
||||
# create a temporary file to hold the runtime configuration values
|
||||
fd, tmpfile = tempfile.mkstemp(suffix='.yaml')
|
||||
with open(tmpfile, 'w') as f:
|
||||
yaml.dump(config, f, default_flow_style=False)
|
||||
puppet_common.puppet_apply_manifest(
|
||||
mgmt_ip, personality, manifest='runtime', runtime=tmpfile)
|
||||
os.close(fd)
|
||||
os.remove(tmpfile)
|
||||
|
||||
|
||||
def upgrade_controller(from_release, to_release):
|
||||
""" Executed on the release N+1 side upgrade controller-1. """
|
||||
|
||||
|
@ -924,6 +1033,8 @@ def upgrade_controller(from_release, to_release):
|
|||
LOG.info("Failed to update hiera configuration")
|
||||
raise
|
||||
|
||||
apply_sriov_config(db_credentials, utils.CONTROLLER_1_HOSTNAME)
|
||||
|
||||
# Remove /etc/kubernetes/admin.conf after it is used to generate
|
||||
# the hiera data
|
||||
admin_conf = os.path.join(utils.KUBERNETES_CONF_PATH,
|
||||
|
@ -934,18 +1045,6 @@ def upgrade_controller(from_release, to_release):
|
|||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to remove file %s" % admin_conf)
|
||||
|
||||
# Prepare for swact
|
||||
LOG.info("Prepare for swact to controller-1")
|
||||
try:
|
||||
subprocess.check_call(['/usr/bin/upgrade_swact_migration.py',
|
||||
'prepare_swact',
|
||||
from_release,
|
||||
to_release],
|
||||
stdout=devnull)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed upgrade_swact_migration prepare_swact")
|
||||
raise
|
||||
|
||||
print("Shutting down upgrade processes...")
|
||||
|
||||
# Stop postgres service
|
||||
|
@ -1334,9 +1433,26 @@ def upgrade_controller_simplex(backup_file):
|
|||
utils.execute_migration_scripts(
|
||||
from_release, to_release, utils.ACTION_MIGRATE)
|
||||
|
||||
hostname = 'controller-0'
|
||||
LOG.info("Generating config for %s" % hostname)
|
||||
try:
|
||||
cutils.create_system_config()
|
||||
cutils.create_host_config(hostname)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.info("Failed to update hiera configuration")
|
||||
raise
|
||||
|
||||
# Runtime manifests may modify platform.conf, so we'll back it up
|
||||
temp_platform_conf = PLATFORM_CONF_FILE + ".backup"
|
||||
shutil.copy(PLATFORM_CONF_FILE, temp_platform_conf)
|
||||
apply_sriov_config(db_credentials, hostname)
|
||||
|
||||
archive.close()
|
||||
shutil.rmtree(staging_dir, ignore_errors=True)
|
||||
|
||||
# Restore platform.conf
|
||||
shutil.move(temp_platform_conf, PLATFORM_CONF_FILE)
|
||||
# Restore sysinv.conf
|
||||
shutil.move("/etc/sysinv/sysinv-temp.conf", "/etc/sysinv/sysinv.conf")
|
||||
# Restore fm.conf
|
||||
|
|
|
@ -27,12 +27,13 @@ LOG = log.getLogger(__name__)
|
|||
def get_upgrade_databases(system_role, shared_services):
|
||||
|
||||
UPGRADE_DATABASES = ('postgres', 'template1', 'sysinv',
|
||||
'barbican', 'fm')
|
||||
'barbican', 'fm', 'helmv2')
|
||||
|
||||
UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (), 'template1': (),
|
||||
'sysinv': (),
|
||||
'barbican': (),
|
||||
'fm': ('alarm',)}
|
||||
'fm': ('alarm',),
|
||||
'helmv2': ()}
|
||||
|
||||
if system_role == sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
|
||||
UPGRADE_DATABASES += ('dcmanager', 'dcorch',)
|
||||
|
@ -148,6 +149,14 @@ def prepare_upgrade(from_load, to_load, i_system, mgmt_address):
|
|||
"config"))
|
||||
raise
|
||||
|
||||
# Point N+1 etcd to N for now. We will migrate when both controllers are
|
||||
# running N+1, during the swact back to controller-0. This solution will
|
||||
# present some problems when we do upgrade etcd, so further development
|
||||
# will be required at that time.
|
||||
etcd_to_dir = os.path.join(tsc.ETCD_PATH, to_load)
|
||||
etcd_from_dir = os.path.join(tsc.ETCD_PATH, from_load)
|
||||
os.symlink(etcd_from_dir, etcd_to_dir)
|
||||
|
||||
# Copy /etc/kubernetes/admin.conf so controller-1 can access
|
||||
# during its upgrade
|
||||
try:
|
||||
|
@ -291,6 +300,13 @@ def abort_upgrade(from_load, to_load, upgrade):
|
|||
except Exception:
|
||||
LOG.exception("Failed to unexport filesystems")
|
||||
|
||||
# Depending on where we are in the upgrade we may need to remove the
|
||||
# symlink to the etcd directory
|
||||
etcd_to_dir = os.path.join(tsc.ETCD_PATH, to_load)
|
||||
if os.path.islink(etcd_to_dir):
|
||||
LOG.info("Unlinking destination etcd directory: %s " % etcd_to_dir)
|
||||
os.unlink(etcd_to_dir)
|
||||
|
||||
# Remove upgrade directories
|
||||
upgrade_dirs = [
|
||||
os.path.join(tsc.PLATFORM_PATH, "config", to_load),
|
||||
|
@ -379,6 +395,9 @@ def complete_upgrade(from_load, to_load, upgrade):
|
|||
os.path.join(tsc.PLATFORM_PATH, ".keyring", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "puppet", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "sysinv", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "armada", from_load),
|
||||
os.path.join(tsc.PLATFORM_PATH, "helm", from_load),
|
||||
os.path.join(tsc.ETCD_PATH, from_load)
|
||||
]
|
||||
|
||||
for directory in upgrade_dirs:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2016-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2016-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -27,9 +27,7 @@ from tsconfig.tsconfig import PLATFORM_PATH
|
|||
from controllerconfig import utils as cutils
|
||||
from controllerconfig.common import constants
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
# sysinv common utils is needed for adding new service account and endpoints
|
||||
# during upgrade.
|
||||
# from sysinv.common import utils as sysinv_utils
|
||||
from sysinv.common import utils as sysinv_utils
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
|
@ -52,7 +50,8 @@ ACTION_MIGRATE = "migrate"
|
|||
ACTION_ACTIVATE = "activate"
|
||||
|
||||
|
||||
def execute_migration_scripts(from_release, to_release, action):
|
||||
def execute_migration_scripts(from_release, to_release, action,
|
||||
migration_script_dir="/etc/upgrade.d"):
|
||||
""" Execute migration scripts with an action:
|
||||
start: Prepare for upgrade on release N side. Called during
|
||||
"system upgrade-start".
|
||||
|
@ -62,8 +61,6 @@ def execute_migration_scripts(from_release, to_release, action):
|
|||
|
||||
devnull = open(os.devnull, 'w')
|
||||
|
||||
migration_script_dir = "/etc/upgrade.d"
|
||||
|
||||
LOG.info("Executing migration scripts with from_release: %s, "
|
||||
"to_release: %s, action: %s" % (from_release, to_release, action))
|
||||
|
||||
|
@ -72,7 +69,16 @@ def execute_migration_scripts(from_release, to_release, action):
|
|||
files = [f for f in os.listdir(migration_script_dir)
|
||||
if os.path.isfile(os.path.join(migration_script_dir, f)) and
|
||||
os.access(os.path.join(migration_script_dir, f), os.X_OK)]
|
||||
files.sort()
|
||||
# From file name, get the number to sort the calling sequence,
|
||||
# abort when the file name format does not follow the pattern
|
||||
# "nnn-*.*", where "nnn" string shall contain only digits, corresponding
|
||||
# to a valid unsigned integer (first sequence of characters before "-")
|
||||
try:
|
||||
files.sort(key=lambda x: int(x.split("-")[0]))
|
||||
except Exception:
|
||||
LOG.exception("Migration script sequence validation failed, invalid "
|
||||
"file name format")
|
||||
raise
|
||||
|
||||
# Execute each migration script
|
||||
for f in files:
|
||||
|
@ -114,6 +120,22 @@ def get_password_from_keyring(service, username):
|
|||
return password
|
||||
|
||||
|
||||
def set_password_in_keyring(service, username):
|
||||
"""Generate random password and store in keyring"""
|
||||
os.environ["XDG_DATA_HOME"] = constants.KEYRING_PERMDIR
|
||||
try:
|
||||
password = sysinv_utils.generate_random_password(length=16)
|
||||
keyring.set_password(service, username, password)
|
||||
except Exception as e:
|
||||
LOG.exception("Received exception when attempting to generate "
|
||||
"password for service %s, username %s: %s" %
|
||||
(service, username, e))
|
||||
raise
|
||||
finally:
|
||||
del os.environ["XDG_DATA_HOME"]
|
||||
return password
|
||||
|
||||
|
||||
def get_upgrade_token(from_release,
|
||||
config,
|
||||
secure_config):
|
||||
|
|
|
@ -32,6 +32,7 @@ DELAY_SEC=70
|
|||
CONTROLLER_UPGRADE_STARTED_FILE="$(basename ${CONTROLLER_UPGRADE_STARTED_FLAG})"
|
||||
IMA_POLICY=/etc/ima.policy
|
||||
PUPPET_CACHE=/etc/puppet/cache
|
||||
PUPPET_CACHE_TMP=/etc/puppet/cache.tmp
|
||||
ACTIVE_CONTROLLER_NOT_FOUND_FLAG="/var/run/.active_controller_not_found"
|
||||
|
||||
fatal_error()
|
||||
|
@ -297,6 +298,60 @@ start()
|
|||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/etcd/etcd-server.crt ]
|
||||
then
|
||||
cp $CONFIG_DIR/etcd/etcd-server.crt /etc/etcd/etcd-server.crt
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/etcd/etcd-server.crt"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/etcd/etcd-server.key ]
|
||||
then
|
||||
cp $CONFIG_DIR/etcd/etcd-server.key /etc/etcd/etcd-server.key
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/etcd/etcd-server.key"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/etcd/etcd-client.crt ]
|
||||
then
|
||||
cp $CONFIG_DIR/etcd/etcd-client.crt /etc/etcd/etcd-client.crt
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/etcd/etcd-client.crt"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/etcd/etcd-client.key ]
|
||||
then
|
||||
cp $CONFIG_DIR/etcd/etcd-client.key /etc/etcd/etcd-client.key
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/etcd/etcd-client.key"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/etcd/ca.crt ]
|
||||
then
|
||||
cp $CONFIG_DIR/etcd/ca.crt /etc/etcd/ca.crt
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/etcd/ca.crt"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/etcd/ca.key ]
|
||||
then
|
||||
cp $CONFIG_DIR/etcd/ca.key /etc/etcd/ca.key
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/etcd/ca.key"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/registry-cert.key ]
|
||||
then
|
||||
cp $CONFIG_DIR/registry-cert.key /etc/ssl/private/registry-cert.key
|
||||
|
@ -334,15 +389,6 @@ start()
|
|||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/admin-ep-cert.pem ]
|
||||
then
|
||||
cp $CONFIG_DIR/admin-ep-cert.pem /etc/ssl/private/
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy $CONFIG_DIR/admin-ep-cert.pem to certificates dir"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e $CONFIG_DIR/dc-adminep-root-ca.crt ]
|
||||
then
|
||||
cp $CONFIG_DIR/dc-adminep-root-ca.crt /etc/pki/ca-trust/source/anchors/
|
||||
|
@ -493,13 +539,26 @@ start()
|
|||
fatal_error "Unable to copy .conf files to /etc/postgresql"
|
||||
fi
|
||||
|
||||
# Copy the hieradata to cache directory
|
||||
rm -rf ${PUPPET_CACHE}
|
||||
cp -R ${PUPPET_PATH} ${PUPPET_CACHE}
|
||||
# rsync the hieradata to temp cache directory
|
||||
rm -rf ${PUPPET_CACHE_TMP}
|
||||
rsync -a "${PUPPET_PATH}/hieradata" "${PUPPET_CACHE_TMP}"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
umount_platform_dir
|
||||
fatal_error "Failed to copy puppet directory ${PUPPET_PATH} into cache dir ${PUPPET_CACHE}"
|
||||
fatal_error "Failed to rsync puppet hieradata from ${PUPPET_PATH} to temp cache directory ${PUPPET_CACHE_TMP}"
|
||||
fi
|
||||
|
||||
# flush data to persistent storage and rename the temp puppet cache
|
||||
# directory to final cache directory. This is more atomic than straight
|
||||
# copy and minimize the chance to have incomplete or corrupted cached
|
||||
# hieradata.
|
||||
sync
|
||||
rm -rf ${PUPPET_CACHE}
|
||||
mv "${PUPPET_CACHE_TMP}" "${PUPPET_CACHE}"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
umount_platform_dir
|
||||
fatal_error "Failed to rename puppet temp cache directory ${PUPPET_CACHE_TMP} to cache directory ${PUPPET_CACHE}"
|
||||
fi
|
||||
|
||||
# Copy the staging secured vault
|
||||
|
@ -520,7 +579,7 @@ start()
|
|||
|
||||
if [ -f ${HOST_HIERA} ]; then
|
||||
echo "$0: Running puppet manifest apply"
|
||||
puppet-manifest-apply.sh ${HIERADATA_PATH} ${IPADDR} controller
|
||||
puppet-manifest-apply.sh ${HIERADATA_PATH} ${IPADDR} ${subfunction}
|
||||
RC=$?
|
||||
if [ $RC -ne 0 ]
|
||||
then
|
||||
|
|
|
@ -5,21 +5,22 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will perform upgrade preparation and migration operations for
|
||||
# host-swact to controller-1.
|
||||
# host-swact to controller-0.
|
||||
#
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from oslo_log import log
|
||||
from controllerconfig.common import log
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
ETCD_PATH = "/opt/etcd"
|
||||
UPGRADE_CONTROLLER_1_FILE = "/etc/platform/.upgrade_swact_controller_1"
|
||||
UPGRADE_ETCD_FILE = os.path.join(ETCD_PATH, ".upgrade_etcd")
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -40,6 +41,11 @@ def main():
|
|||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
LOG.info("upgrade_swact_migration called with action: %s from_release: %s "
|
||||
"to_release: %s" % (action, from_release, to_release))
|
||||
|
||||
if action == "migrate_etcd":
|
||||
try:
|
||||
migrate_etcd_on_swact()
|
||||
|
@ -48,6 +54,8 @@ def main():
|
|||
return 1
|
||||
elif action == "prepare_swact":
|
||||
upgrade_prepare_swact(from_release, to_release)
|
||||
|
||||
LOG.info("upgrade_swact_migration complete")
|
||||
return 0
|
||||
|
||||
|
||||
|
@ -56,12 +64,21 @@ def upgrade_prepare_swact(from_release, to_release):
|
|||
'from_release': from_release,
|
||||
'to_release': to_release
|
||||
}
|
||||
with open(UPGRADE_CONTROLLER_1_FILE, 'w') as f:
|
||||
with open(UPGRADE_ETCD_FILE, 'w') as f:
|
||||
yaml.dump(migrate_data, f, default_flow_style=False)
|
||||
|
||||
|
||||
def migrate_etcd_on_swact():
|
||||
with open(UPGRADE_CONTROLLER_1_FILE, 'r') as f:
|
||||
if not os.path.isfile(UPGRADE_ETCD_FILE):
|
||||
LOG.info("Skipping etcd migration, no request %s" %
|
||||
UPGRADE_ETCD_FILE)
|
||||
return
|
||||
|
||||
if socket.gethostname() != 'controller-0':
|
||||
LOG.info("Skipping etcd migration, not running on controller-0")
|
||||
return
|
||||
|
||||
with open(UPGRADE_ETCD_FILE, 'r') as f:
|
||||
document = yaml.safe_load(f)
|
||||
|
||||
from_release = document.get('from_release')
|
||||
|
@ -69,23 +86,22 @@ def migrate_etcd_on_swact():
|
|||
|
||||
dest_etcd = os.path.join(ETCD_PATH, to_release)
|
||||
|
||||
if os.path.exists(dest_etcd):
|
||||
# The dest_etcd must not have already been created,
|
||||
# however this can occur on a forced host-swact
|
||||
LOG.info("skipping etcd migration %s already exists" %
|
||||
dest_etcd)
|
||||
return
|
||||
if os.path.islink(dest_etcd):
|
||||
LOG.info("Unlinking destination etcd directory: %s " % dest_etcd)
|
||||
os.unlink(dest_etcd)
|
||||
|
||||
if not os.path.isfile(UPGRADE_CONTROLLER_1_FILE):
|
||||
LOG.info("skipping etcd migration, no request %s" %
|
||||
UPGRADE_CONTROLLER_1_FILE)
|
||||
if os.path.exists(dest_etcd):
|
||||
# The directory was already copied but somehow the upgrade file exists
|
||||
LOG.info("Skipping etcd migration %s already exists" %
|
||||
dest_etcd)
|
||||
os.remove(UPGRADE_ETCD_FILE)
|
||||
return
|
||||
|
||||
source_etcd = os.path.join(ETCD_PATH, from_release)
|
||||
try:
|
||||
shutil.copytree(os.path.join(source_etcd),
|
||||
os.path.join(dest_etcd))
|
||||
os.remove(UPGRADE_CONTROLLER_1_FILE)
|
||||
os.remove(UPGRADE_ETCD_FILE)
|
||||
except subprocess.CalledProcessError:
|
||||
LOG.exception("Failed to migrate %s" % source_etcd)
|
||||
raise
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
|
||||
pylint <=1.9.3;python_version<'3.0'
|
||||
pytest
|
||||
mock
|
||||
mockproc>= 0.3.1 # BSD
|
||||
coverage>=3.6
|
||||
PyYAML>=3.10.0 # MIT
|
||||
os-testr>=0.8.0 # Apache-2.0
|
||||
stestr>=1.0.0 # Apache-2.0
|
||||
testresources>=0.2.4 # Apache-2.0/BSD
|
||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# and then run "tox" from this directory.
|
||||
|
||||
[tox]
|
||||
envlist = flake8, pylint, py27
|
||||
envlist = flake8, pylint, py27, py36
|
||||
# Tox does not work if the path to the workdir is too long, so move it to /tmp
|
||||
toxworkdir = /tmp/{env:USER}_cctox
|
||||
stxdir = {toxinidir}/../../..
|
||||
|
@ -28,11 +28,6 @@ commands =
|
|||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:pylint]
|
||||
basepython = python2.7
|
||||
deps = {[testenv]deps}
|
||||
commands = pylint {posargs} controllerconfig --rcfile=./pylint.rc --extension-pkg-whitelist=netifaces
|
||||
|
||||
[testenv:py27]
|
||||
basepython = python2.7
|
||||
deps = {[testenv]deps}
|
||||
|
@ -49,6 +44,11 @@ commands =
|
|||
stestr run {posargs}
|
||||
stestr slowest
|
||||
|
||||
[testenv:pylint]
|
||||
basepython = python2.7
|
||||
deps = {[testenv]deps}
|
||||
commands = pylint {posargs} controllerconfig --rcfile=./pylint.rc --extension-pkg-whitelist=netifaces
|
||||
|
||||
[testenv:flake8]
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
|
|
|
@ -375,7 +375,12 @@ def adjust_user_partitions():
|
|||
|
||||
increased_partition_number = \
|
||||
installed_lvm_device['partition_number'] + 2
|
||||
for device, partition in six.iteritems(partition_map):
|
||||
# partition_map is not an ordered dictionary, we
|
||||
# need to sort partition_map by its key(device node)
|
||||
# to ensure the adjustments for user created partitions
|
||||
# are correct
|
||||
for device, partition in sorted(
|
||||
six.iteritems(partition_map)):
|
||||
# Adjust the device node/path of user created
|
||||
# partitions. The start/end/size of the partitions
|
||||
# will not be changed.
|
||||
|
@ -401,7 +406,9 @@ def adjust_user_partitions():
|
|||
update_db_pvs.append(pv)
|
||||
|
||||
# Reverse the updating order. The partitions that
|
||||
# moving backwards need to updated first.
|
||||
# moving backwards need to updated first because of
|
||||
# the UniqueConstraint "u_partition_path_host_id"
|
||||
# for partition table
|
||||
update_db_partitions = update_db_partitions[::-1]
|
||||
else:
|
||||
# The primary LVM partition for cgts-vg in new release
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/bash
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will create the directory /opt/platform/device_images
|
||||
# if it does not exist.
|
||||
#
|
||||
# This script is needed for upgrade from release 20.06.
|
||||
#
|
||||
|
||||
NAME=$(basename $0)
|
||||
|
||||
# The migration scripts are passed these parameters:
|
||||
FROM_RELEASE=$1
|
||||
TO_RELEASE=$2
|
||||
ACTION=$3
|
||||
|
||||
source /etc/platform/openrc
|
||||
|
||||
# This will log to /var/log/platform.log
|
||||
function log {
|
||||
logger -p local1.info $1
|
||||
}
|
||||
|
||||
DIR_NAME='/opt/platform/device_images'
|
||||
if [ "$FROM_RELEASE" == "20.06" ] && [ "$ACTION" == "migrate" ]; then
|
||||
if [ ! -d $DIR_NAME ]; then
|
||||
log "$NAME: Create directory $DIR_NAME."
|
||||
mkdir $DIR_NAME
|
||||
if [ $? -ne 0 ]; then
|
||||
log "$NAME: Failed to create directory $DIR_NAME"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will update i_system table in sysinv database
|
||||
# in preparation for upgrade.
|
||||
#
|
||||
# The 'i_system' table in sysinv DB has capabilities attribute
|
||||
# which lists 'identity' as a shared service. However, identity
|
||||
# is no longer a shared service in DC. The script takes care of
|
||||
# this by removing identity entry on upgrade.
|
||||
#
|
||||
# This script can be removed in the release that follows stx.5.0.
|
||||
#
|
||||
import json
|
||||
import psycopg2
|
||||
import sys
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg]
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
if from_release == "20.06" and action == "migrate":
|
||||
try:
|
||||
if is_subcloud():
|
||||
LOG.info("Removing identity shared service...")
|
||||
remove_identity_shared_service()
|
||||
except Exception:
|
||||
LOG.exception("Failed to remove identity entry during upgrade.")
|
||||
return 1
|
||||
|
||||
|
||||
def is_subcloud():
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("SELECT * from i_system")
|
||||
system = cur.fetchone()
|
||||
return system['distributed_cloud_role'] == 'subcloud'
|
||||
|
||||
|
||||
def remove_identity_shared_service():
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("SELECT * from i_system")
|
||||
system = cur.fetchone()
|
||||
|
||||
# Fetch the capabilities attribute and convert it into a dict
|
||||
capabilities = json.loads(system['capabilities'])
|
||||
|
||||
# Fetch shared services
|
||||
# It is of type unicode initially
|
||||
# and we convert it into a list for further processing
|
||||
shared_services = str(capabilities["shared_services"])
|
||||
shared_service_list = shared_services.strip('][').split(', ')
|
||||
|
||||
# Create a new list which removes 'identity' shared service
|
||||
# and any empty string elements from list
|
||||
new_shared_services = [item.strip("'")
|
||||
for item in shared_service_list
|
||||
if "identity" not in item and item != '']
|
||||
|
||||
if len(shared_service_list) != len(new_shared_services):
|
||||
capabilities["shared_services"] = str(new_shared_services)
|
||||
LOG.info("Old shared service list: %s, "
|
||||
"New shared service list: %s"
|
||||
% (shared_services, new_shared_services))
|
||||
cur.execute("UPDATE i_system SET capabilities='%s' where id=%s"
|
||||
% (json.dumps(capabilities), system["id"]))
|
||||
|
||||
LOG.info("Removed identity from shared service list on subcloud.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -0,0 +1,99 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script updates the subcloud_sync table in dcorch database
|
||||
# in preparation for upgrade from release 20.06.
|
||||
#
|
||||
# This script can be removed in the release that follows.
|
||||
#
|
||||
|
||||
import json
|
||||
import psycopg2
|
||||
import sys
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg]
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
if from_release == "20.06" and action == "migrate":
|
||||
try:
|
||||
if is_system_controller():
|
||||
LOG.info("Performing dcorch subcloud sync data migration...")
|
||||
update_subcloud_sync()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print(ex)
|
||||
return 1
|
||||
|
||||
|
||||
def is_system_controller():
|
||||
conn = psycopg2.connect("dbname='sysinv' user='postgres'")
|
||||
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("SELECT * from i_system")
|
||||
system = cur.fetchone()
|
||||
return system['distributed_cloud_role'] == 'systemcontroller'
|
||||
|
||||
|
||||
def update_subcloud_sync():
|
||||
conn = psycopg2.connect("dbname='dcorch' user='postgres'")
|
||||
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
# Check if there are any subclouds
|
||||
cur.execute("SELECT * from subcloud")
|
||||
subcloud_records = cur.fetchall()
|
||||
if not subcloud_records:
|
||||
LOG.info("dcorch subcloud_sync data migration not required")
|
||||
return
|
||||
|
||||
for record in subcloud_records:
|
||||
capabilities = json.loads(record['capabilities'])
|
||||
endpoint_types = capabilities.get('endpoint_types')
|
||||
|
||||
for ept in endpoint_types:
|
||||
# Insert a record into subcloud sync for each of the
|
||||
# endpoint types supported for each subcloud
|
||||
cur.execute("INSERT into subcloud_sync (subcloud_id, "
|
||||
"subcloud_name, endpoint_type, "
|
||||
"audit_status, created_at, "
|
||||
"deleted) values (%d, '%s', "
|
||||
"'%s', '%s', '%s', 0)"
|
||||
% (record['id'],
|
||||
record['region_name'],
|
||||
ept,
|
||||
'none',
|
||||
record['created_at']))
|
||||
|
||||
LOG.info("dcorch subcloud_sync data migration completed.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -0,0 +1,80 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script updates armada to containerized version
|
||||
# based using Helm v3. This also cleans up previous
|
||||
# tiller-deployment.
|
||||
#
|
||||
# This script can be removed in the release that follows stx5.0
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from sysinv.common.kubernetes import KUBERNETES_ADMIN_CONF
|
||||
from controllerconfig.common import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg]
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if action == 'activate' and not is_containerized_armada_installed():
|
||||
LOG.info("%s invoked with from_release = %s to_release = %s "
|
||||
"action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
update_armada_helmv3()
|
||||
|
||||
|
||||
def is_containerized_armada_installed():
|
||||
"""Check if containerized armada is installed by helmv3"""
|
||||
try:
|
||||
cmd = "/usr/sbin/helm list " \
|
||||
"--namespace armada --filter armada --output json " \
|
||||
"--kubeconfig {} ".format(KUBERNETES_ADMIN_CONF)
|
||||
result = subprocess.check_output(cmd, shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
if not json.loads(result):
|
||||
return False
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.exception("Unable to query armada helmv3 release: %s" % e.output)
|
||||
raise
|
||||
|
||||
|
||||
def update_armada_helmv3():
|
||||
playbooks_root = '/usr/share/ansible/stx-ansible/playbooks'
|
||||
upgrade_script = 'upgrade-k8s-armada-helm.yml'
|
||||
cmd = 'ansible-playbook {}/{}'.format(playbooks_root, upgrade_script)
|
||||
sub = subprocess.Popen(cmd, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = sub.communicate()
|
||||
|
||||
if sub.returncode != 0:
|
||||
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
|
||||
raise Exception('Cannot update armada')
|
||||
|
||||
LOG.info('armada helm v3 updated successfully')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -0,0 +1,170 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This migration script is used for migrating all helm releases
|
||||
# from configmaps to postgresql during the activate stage of
|
||||
# a platform upgrade.
|
||||
#
|
||||
# This script can be removed in the release that follows stx5.0
|
||||
#
|
||||
|
||||
import collections
|
||||
from datetime import datetime
|
||||
import psycopg2
|
||||
import subprocess
|
||||
import sys
|
||||
import json
|
||||
import keyring
|
||||
|
||||
from controllerconfig.common import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
Release = collections.namedtuple(
|
||||
'release', 'key body name version status owner created_at modified_at')
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg]
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
if from_release == '20.06' and action == 'activate':
|
||||
LOG.info("%s invoked with from_release = %s to_release = %s "
|
||||
"action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
migrate_helm_releases()
|
||||
LOG.info("Complete helm releases migration for release %s "
|
||||
"to %s with action %s."
|
||||
% (from_release, to_release, action))
|
||||
|
||||
|
||||
def execute_command(cmd):
|
||||
sub = subprocess.Popen(cmd, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = sub.communicate()
|
||||
if sub.returncode != 0:
|
||||
LOG.error("Command failed:\n %s\n%s\n%s" % (cmd, stdout, stderr))
|
||||
raise Exception("Failed to execute command: %s" % cmd)
|
||||
return stdout
|
||||
|
||||
|
||||
def get_helm_releases():
|
||||
# Get all configmaps that store helm releases
|
||||
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf get configmaps " \
|
||||
"-n kube-system -l OWNER=TILLER --sort-by '{.metadata.name}' " \
|
||||
"--template '{{range .items}}{{.metadata.name}}{{\"\\n\"}}{{end}}'"
|
||||
|
||||
releases = execute_command(cmd)
|
||||
releases_list = [r for r in releases.split('\n') if r]
|
||||
return releases_list
|
||||
|
||||
|
||||
def delete_helm_releases():
|
||||
# Delete all configmaps that store helm releases
|
||||
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf delete configmaps " \
|
||||
"-n kube-system -l OWNER=TILLER"
|
||||
execute_command(cmd)
|
||||
|
||||
|
||||
def get_helm_release_from_configmap(release_name):
|
||||
# Get the content of a specific helm release from configmap
|
||||
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf get configmaps " \
|
||||
"-n kube-system {} -o json".format(release_name)
|
||||
release_data = execute_command(cmd)
|
||||
|
||||
return json.loads(release_data)
|
||||
|
||||
|
||||
def map_helm_release(release):
|
||||
# Map the format of a helm release from configmap to postgresql
|
||||
try:
|
||||
key = str(release['metadata']['name'])
|
||||
body = str(release['data']['release'])
|
||||
name = str(release['metadata']['labels']['NAME'])
|
||||
version = int(release['metadata']['labels']['VERSION'])
|
||||
status = str(release['metadata']['labels']['STATUS'])
|
||||
owner = str(release['metadata']['labels']['OWNER'])
|
||||
created_at = int(datetime.strftime(datetime.strptime(
|
||||
release['metadata']['creationTimestamp'],
|
||||
"%Y-%m-%dT%H:%M:%SZ"), "%s"))
|
||||
modified_at = int(release['metadata']['labels']['MODIFIED_AT'])
|
||||
|
||||
mapped_release = Release(
|
||||
key=key, body=body, name=name, version=version, status=status,
|
||||
owner=owner, created_at=created_at, modified_at=modified_at)
|
||||
except Exception as e:
|
||||
LOG.exception("Failed to convert helm release: %s" % e)
|
||||
raise
|
||||
|
||||
return mapped_release
|
||||
|
||||
|
||||
def create_helm_release_in_db(conn, release):
|
||||
with conn:
|
||||
with conn.cursor() as cur:
|
||||
try:
|
||||
cur.execute(
|
||||
"insert into releases(key, body, name, version,"
|
||||
"status, owner, created_at, modified_at) "
|
||||
"values(%s, %s, %s, %s, %s, %s, %s, %s)",
|
||||
release)
|
||||
except psycopg2.IntegrityError:
|
||||
# release already exists
|
||||
pass
|
||||
except Exception as e:
|
||||
LOG.exception("Failed to create release in db:\n%s" % e)
|
||||
raise
|
||||
|
||||
|
||||
def migrate_helm_releases():
|
||||
releases = get_helm_releases()
|
||||
|
||||
if not releases:
|
||||
LOG.info("No helm releases need to be migrated.")
|
||||
return
|
||||
|
||||
LOG.info("Start migrating helm releases:\n%s" % releases)
|
||||
|
||||
helmv2_db_pw = keyring.get_password("helmv2", "database")
|
||||
if not helmv2_db_pw:
|
||||
raise Exception("Unable to get password to access helmv2 database.")
|
||||
|
||||
try:
|
||||
conn = psycopg2.connect(user="admin-helmv2",
|
||||
password=helmv2_db_pw,
|
||||
host="localhost",
|
||||
database="helmv2")
|
||||
except Exception as e:
|
||||
LOG.exception("Failed to connect helmv2 database: %s" % e)
|
||||
raise
|
||||
|
||||
for release in releases:
|
||||
release_data = get_helm_release_from_configmap(release)
|
||||
mapped_release = map_helm_release(release_data)
|
||||
create_helm_release_in_db(conn, mapped_release)
|
||||
LOG.info("Migrated release: %s" % release)
|
||||
|
||||
delete_helm_releases()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -96,13 +96,19 @@ if [ "$FROM_RELEASE" == "20.06" ] && [ "$ACTION" == "activate" ]; then
|
|||
fi
|
||||
|
||||
# Get the existing application details
|
||||
EXISTING_APP_VERSION=$(system application-show $EXISTING_APP_NAME --column app_version --format value)
|
||||
EXISTING_APP_STATUS=$(system application-show $EXISTING_APP_NAME --column status --format value)
|
||||
EXISTING_APP_INFO=$(system application-show $EXISTING_APP_NAME --column app_version --column status --format yaml)
|
||||
EXISTING_APP_VERSION=$(echo ${EXISTING_APP_INFO} | sed 's/.*app_version:[[:space:]]\(\S*\).*/\1/')
|
||||
EXISTING_APP_STATUS=$(echo ${EXISTING_APP_INFO} | sed 's/.*status:[[:space:]]\(\S*\).*/\1/')
|
||||
|
||||
log "$NAME: $EXISTING_APP_NAME, version $EXISTING_APP_VERSION, is currently in the state: $EXISTING_APP_STATUS"
|
||||
|
||||
if [ "x${UPGRADE_APP_VERSION}" == "x${EXISTING_APP_VERSION}" ]; then
|
||||
log "$NAME: ${UPGRADE_APP_NAME}, version ${EXISTING_APP_VERSION}, is already present. skipping..."
|
||||
continue
|
||||
# If the app is in uploaded or applied state, then we continue with next iteration.
|
||||
# Else, the code execution proceeds and the script would exit with an unexpected state.
|
||||
if [[ "${EXISTING_APP_STATUS}" =~ ^(uploaded|applied)$ ]]; then
|
||||
log "$NAME: ${UPGRADE_APP_NAME}, version ${EXISTING_APP_VERSION}, is already present. skipping..."
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# All applications should be in an 'applied' or 'uploaded' state. Any other state is unexpected
|
||||
|
@ -152,9 +158,10 @@ if [ "$FROM_RELEASE" == "20.06" ] && [ "$ACTION" == "activate" ]; then
|
|||
system application-update $fqpn_app
|
||||
# Wait on the upload, should be quick
|
||||
for tries in $(seq 1 $UPDATE_RESULT_ATTEMPTS); do
|
||||
UPDATING_APP_NAME=$(system application-show $UPGRADE_APP_NAME --column name --format value)
|
||||
UPDATING_APP_VERSION=$(system application-show $UPGRADE_APP_NAME --column app_version --format value)
|
||||
UPDATING_APP_STATUS=$(system application-show $UPGRADE_APP_NAME --column status --format value)
|
||||
UPDATING_APP_INFO=$(system application-show $UPGRADE_APP_NAME --column name --column app_version --column status --format yaml)
|
||||
UPDATING_APP_NAME=$(echo ${UPDATING_APP_INFO} | sed 's/.*name:[[:space:]]\(\S*\).*/\1/')
|
||||
UPDATING_APP_VERSION=$(echo ${UPDATING_APP_INFO} | sed 's/.*app_version:[[:space:]]\(\S*\).*/\1/')
|
||||
UPDATING_APP_STATUS=$(echo ${UPDATING_APP_INFO} | sed 's/.*status:[[:space:]]\(\S*\).*/\1/')
|
||||
|
||||
if [ "${UPDATING_APP_NAME}" == "${UPGRADE_APP_NAME}" ] && \
|
||||
[ "${UPDATING_APP_VERSION}" == "${UPGRADE_APP_VERSION}" ] && \
|
|
@ -0,0 +1,89 @@
|
|||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright (c) 2020 Intel Corporation.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Active secured etcd after upgrade.
|
||||
#
|
||||
# Note: this can be removed in the release after STX5.0
|
||||
|
||||
. /etc/platform/platform.conf
|
||||
|
||||
FROM_REL=$1
|
||||
TO_REL=$2
|
||||
ACTION=$3
|
||||
|
||||
function log {
|
||||
logger -p local1.info $1
|
||||
}
|
||||
|
||||
# below function is cloned from ../scripts/controller_config
|
||||
get_ip()
|
||||
{
|
||||
HOST_NAME=$1
|
||||
|
||||
# Check /etc/hosts for the hostname
|
||||
HOST_IP=$(cat /etc/hosts | grep "${HOST_NAME}" | awk '{print $1}')
|
||||
if [ -n "${HOST_IP}" ]; then
|
||||
echo ${HOST_IP}
|
||||
return
|
||||
fi
|
||||
|
||||
# Try the DNS query
|
||||
# Because dnsmasq can resolve both a hostname to both an IPv4 and an IPv6
|
||||
# address in certain situations, and the last address is the IPv6, which
|
||||
# would be the management, this is preferred over the IPv4 pxeboot address,
|
||||
# so take the last address only.
|
||||
HOST_IP=$(dig +short ANY $host|tail -1)
|
||||
if [[ "${HOST_IP}" =~ ^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$ ]]; then
|
||||
echo ${HOST_IP}
|
||||
return
|
||||
fi
|
||||
if [[ "${HOST_IP}" =~ ^[0-9a-z]*\:[0-9a-z\:]*$ ]]; then
|
||||
echo ${HOST_IP}
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
enable_secured_etcd()
|
||||
{
|
||||
STATIC_YAML="/opt/platform/puppet/${sw_version}/hieradata/static.yaml"
|
||||
SYSTEM_YAML="/opt/platform/puppet/${sw_version}/hieradata/system.yaml"
|
||||
|
||||
if [[ ! -f ${STATIC_YAML} ]] || [[ ! -f ${SYSTEM_YAML} ]]; then
|
||||
log "Could not find specific static/system yaml files in \
|
||||
/opt/platform/puppet/${sw_version}/hieradata!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ETCD_SEC_ENABLED=$(grep "platform::etcd::params::security_enabled" ${STATIC_YAML} | awk '{print $2}')
|
||||
CLUSTER_HOST_ADDRESS=$(grep "platform::network::cluster_host::params::controller_address" ${SYSTEM_YAML} | awk '{print $2}')
|
||||
CLUSTER_HOST_ADDRESS_VERSION=$(grep "platform::network::cluster_host::params::subnet_version" ${SYSTEM_YAML} | awk '{print $2}')
|
||||
HOST_ADDR=$(get_ip $(hostname))
|
||||
|
||||
if [ "$ETCD_SEC_ENABLED" != "true" ]; then
|
||||
ANSIBLE_LOG_PATH=/root/enable_secured_etcd.log \
|
||||
ansible-playbook /usr/share/ansible/stx-ansible/playbooks/enable_secured_etcd.yml \
|
||||
-e "cluster_floating_address=${CLUSTER_HOST_ADDRESS}" \
|
||||
-e "etcd_listen_address_version=${CLUSTER_HOST_ADDRESS_VERSION}" \
|
||||
-e "puppet_permdir=/opt/platform/puppet/${sw_version}" \
|
||||
-e "config_permdir=/opt/platform/config/${sw_version}" \
|
||||
-e "ipaddress=${HOST_ADDR}" \
|
||||
-e "k8s_root_ca_cert=''" \
|
||||
-e "k8s_root_ca_key=''"
|
||||
if [ $? -ne 0 ]; then
|
||||
log "Failed to run ansible playbook!"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
log "${0} invoked with from_release = ${FROM_REL} to_release = ${TO_REL} action = ${ACTION}"
|
||||
|
||||
if [ ${FROM_REL} == "20.06" -a ${ACTION} == "activate" ]; then
|
||||
enable_secured_etcd
|
||||
fi
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,70 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script updates the dc root ca certificate to include more
|
||||
# DN information and add separated admin endpoint certificate.
|
||||
# This is in preparation for the future certificate renewal.
|
||||
#
|
||||
# This script can be removed in the release that follows 20.06.
|
||||
#
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from controllerconfig.common import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg]
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
|
||||
if from_release == '20.06' and action == 'activate':
|
||||
create_deployment_ns()
|
||||
|
||||
|
||||
deployment_ns_yaml = """
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: deployment
|
||||
"""
|
||||
|
||||
|
||||
def create_deployment_ns():
|
||||
cmd = "echo '%s' | " \
|
||||
"kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f -" % \
|
||||
deployment_ns_yaml
|
||||
sub = subprocess.Popen(cmd, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = sub.communicate()
|
||||
if sub.returncode != 0:
|
||||
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
|
||||
raise Exception('Cannot create deployment namespace')
|
||||
|
||||
LOG.info('Deployment namespace updated successfully')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -9,7 +9,6 @@
|
|||
# This script can be removed in the release that follows stx.5.0
|
||||
#
|
||||
|
||||
from shutil import copyfile
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
|
@ -83,7 +82,7 @@ def execute_command(cmd):
|
|||
stdout, stderr = sub.communicate()
|
||||
if sub.returncode != 0:
|
||||
LOG.error('Command failed:\n %s\n. %s\n%s' % (cmd, stdout, stderr))
|
||||
raise Exception('Failed to update certificate')
|
||||
raise Exception('Failed to execute command: %s' % cmd)
|
||||
return stdout
|
||||
|
||||
|
||||
|
@ -112,31 +111,6 @@ def update_sc_admin_endpoint_cert(to_release):
|
|||
else:
|
||||
raise Exception('Command failed after retries: %s' % cmd)
|
||||
|
||||
# Extract subcloud admin endpoint certificate
|
||||
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf get secret \
|
||||
sc-adminep-certificate -n sc-cert -o=jsonpath='{.data.tls\.crt}' \
|
||||
| base64 --decode"
|
||||
cert = execute_command(cmd)
|
||||
|
||||
# Extract subcloud admin endpoint private key
|
||||
cmd = "kubectl --kubeconfig=/etc/kubernetes/admin.conf get secret \
|
||||
sc-adminep-certificate -n sc-cert -o=jsonpath='{.data.tls\.key}' \
|
||||
| base64 --decode"
|
||||
key = execute_command(cmd)
|
||||
|
||||
# Create haproxy tls certificate
|
||||
cert_file = "/etc/ssl/private/admin-ep-cert.pem"
|
||||
with open(cert_file, 'w') as f:
|
||||
f.write(key + cert)
|
||||
|
||||
# Copy admin endpoint certficates to the shared filesystem directory
|
||||
shared_file = "/opt/platform/config/%s/admin-ep-cert.pem" % to_release
|
||||
copyfile(cert_file, shared_file)
|
||||
|
||||
# Restart haproxy to take the new cert
|
||||
cmd = "sm-restart service haproxy"
|
||||
execute_command(cmd)
|
||||
|
||||
LOG.info('Subcloud admin endpoint certificate updated successfully')
|
||||
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ def main():
|
|||
|
||||
log.configure()
|
||||
|
||||
if to_release == '20.06' and action == 'activate':
|
||||
if from_release == '20.06' and action == 'activate':
|
||||
LOG.info("%s invoked from_release = %s to_release = %s action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
apply_mandatory_psp_policies()
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will remove snmp related data (icommunity and
|
||||
# itrapdest) in dcorch database according to the host based
|
||||
# SNMP removal in preparation for upgrade from release 20.06.
|
||||
#
|
||||
|
||||
|
||||
import psycopg2
|
||||
import sys
|
||||
from controllerconfig.common import log
|
||||
from psycopg2.extras import RealDictCursor
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
arg = 1
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg]
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
|
||||
if from_release == "20.06" and action == "migrate":
|
||||
try:
|
||||
if is_system_controller():
|
||||
LOG.info("Performing dcorch snmp data removal...")
|
||||
remove_snmp_record()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
print(ex)
|
||||
return 1
|
||||
|
||||
|
||||
def is_system_controller():
|
||||
with open('/etc/platform/platform.conf', 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
for line in lines:
|
||||
if line.strip() == 'distributed_cloud_role=systemcontroller':
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def remove_snmp_in_orch_request(cur, job_id):
|
||||
# Check if the record exists in orch_request
|
||||
cur.execute("select * from orch_request where orch_job_id = '%d'" %
|
||||
job_id)
|
||||
orch_request = cur.fetchall()
|
||||
if orch_request:
|
||||
cur.execute("delete from orch_request where orch_job_id = '%d'" %
|
||||
job_id)
|
||||
LOG.info("icommunity/itrapdest is removed in orch_request.")
|
||||
else:
|
||||
LOG.info("There is no icommunity/itrapdest in orch_request.")
|
||||
|
||||
|
||||
def remove_snmp_in_orch_job(cur, master_id):
|
||||
# Check if the record exists in orch_job
|
||||
cur.execute("select * from orch_job where source_resource_id = '%s'" %
|
||||
master_id)
|
||||
orch_job = cur.fetchall()
|
||||
if orch_job:
|
||||
for orch_job_record in orch_job:
|
||||
remove_id = orch_job_record['id']
|
||||
remove_snmp_in_orch_request(cur, remove_id)
|
||||
cur.execute("delete from orch_job where id = %d" % (remove_id))
|
||||
LOG.info("icommunity is removed in orch_job.")
|
||||
else:
|
||||
LOG.info("There is no icommunity/itrapdest in orch_job.")
|
||||
|
||||
|
||||
def remove_snmp_in_subcloud_resource(cur, master_id):
|
||||
# Check if the record exists in subcloud_resource
|
||||
cur.execute("select * from subcloud_resource "
|
||||
"where subcloud_resource_id = '%s'" % (master_id))
|
||||
resource_subcloud = cur.fetchall()
|
||||
if resource_subcloud:
|
||||
cur.execute("delete from subcloud_resource "
|
||||
"where subcloud_resource_id = '%s'" % (master_id))
|
||||
LOG.info("icommunity is removed in subcloud_resource.")
|
||||
else:
|
||||
LOG.info("There is no icommunity/itrapdest in subcloud_resource.")
|
||||
|
||||
|
||||
def remove_snmp_record():
|
||||
conn = psycopg2.connect("dbname='dcorch' user='postgres'")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
# Check if any icommunity or itrapdest record exists
|
||||
cur.execute("select * from resource where resource_type in "
|
||||
"('icommunity','itrapdest')")
|
||||
resource_records = cur.fetchall()
|
||||
if not resource_records:
|
||||
LOG.info("Nothing to do - "
|
||||
"there is no icommunity/itrapdest in resource.")
|
||||
return
|
||||
for data_resource in resource_records:
|
||||
master_id = data_resource['master_id']
|
||||
remove_snmp_in_subcloud_resource(cur, master_id)
|
||||
remove_snmp_in_orch_job(cur, master_id)
|
||||
cur.execute("delete from resource "
|
||||
"where master_id = '%s'" % (master_id))
|
||||
LOG.info("icommunity/itrapdest is removed from resource.")
|
||||
LOG.info("snmp community and trapdest data removal completed.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# This script will clear the host config target.
|
||||
# This is required in order to ensure tracking is aligned with config
|
||||
# requests in N+1 release and not due to potential stale configuration
|
||||
# from N release.
|
||||
|
||||
import psycopg2
|
||||
import sys
|
||||
|
||||
from psycopg2.extras import RealDictCursor
|
||||
from controllerconfig.common import log
|
||||
|
||||
LOG = log.get_logger(__name__)
|
||||
|
||||
|
||||
def main():
|
||||
action = None
|
||||
from_release = None
|
||||
to_release = None
|
||||
arg = 1
|
||||
|
||||
while arg < len(sys.argv):
|
||||
if arg == 1:
|
||||
from_release = sys.argv[arg]
|
||||
elif arg == 2:
|
||||
to_release = sys.argv[arg] # noqa
|
||||
elif arg == 3:
|
||||
action = sys.argv[arg]
|
||||
else:
|
||||
print ("Invalid option %s." % sys.argv[arg])
|
||||
return 1
|
||||
arg += 1
|
||||
|
||||
log.configure()
|
||||
|
||||
LOG.debug("%s invoked with from_release = %s to_release = %s action = %s"
|
||||
% (sys.argv[0], from_release, to_release, action))
|
||||
|
||||
# This host table data migration will likely be required for each release
|
||||
if action == "migrate":
|
||||
try:
|
||||
reset_config_target()
|
||||
except Exception as ex:
|
||||
LOG.exception(ex)
|
||||
return 1
|
||||
|
||||
|
||||
def reset_config_target():
|
||||
|
||||
conn = psycopg2.connect("dbname=sysinv user=postgres")
|
||||
with conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute("update i_host set config_target=NULL where "
|
||||
"recordtype!='profile'",)
|
||||
|
||||
LOG.info("Reset host config_target completed")
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -13,10 +13,10 @@ BuildRequires: python3-setuptools
|
|||
BuildRequires: python3-pbr
|
||||
BuildRequires: python3-pip
|
||||
BuildRequires: python3-wheel
|
||||
Requires: bash-completion
|
||||
Requires: python3-httplib2
|
||||
Requires: python3-prettytable
|
||||
Requires: bash-completion
|
||||
Requires: python3-dateutil
|
||||
Requires: python3-keystoneclient
|
||||
Requires: python3-oslo-i18n
|
||||
Requires: python3-oslo-serialization
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2017 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -34,9 +34,10 @@ SB_TYPE_LVM = 'lvm'
|
|||
SB_TYPE_CEPH = 'ceph'
|
||||
SB_TYPE_CEPH_EXTERNAL = 'ceph-external'
|
||||
SB_TYPE_EXTERNAL = 'external'
|
||||
SB_TYPE_CEPH_ROOK = 'ceph-rook'
|
||||
|
||||
SB_SUPPORTED = [SB_TYPE_FILE, SB_TYPE_LVM, SB_TYPE_CEPH, SB_TYPE_CEPH_EXTERNAL,
|
||||
SB_TYPE_EXTERNAL]
|
||||
SB_TYPE_EXTERNAL, SB_TYPE_CEPH_ROOK]
|
||||
# Storage backend state
|
||||
SB_STATE_CONFIGURED = 'configured'
|
||||
SB_STATE_CONFIGURING = 'configuring'
|
||||
|
@ -109,3 +110,11 @@ PARTITION_STATUS_MSG = {
|
|||
# Partition table types.
|
||||
PARTITION_TABLE_GPT = "gpt"
|
||||
PARTITION_TABLE_MSDOS = "msdos"
|
||||
|
||||
# Network definitions
|
||||
NETWORK_TYPE_MGMT = 'mgmt'
|
||||
NETWORK_TYPE_CLUSTER_HOST = 'cluster-host'
|
||||
|
||||
SB_SUPPORTED_NETWORKS = {
|
||||
SB_TYPE_CEPH: [NETWORK_TYPE_MGMT, NETWORK_TYPE_CLUSTER_HOST]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,177 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import testtools
|
||||
|
||||
from cgtsclient.tests import utils
|
||||
import cgtsclient.v1.kube_cluster
|
||||
|
||||
FAKE_CLUSTER = {
|
||||
"cluster_name": "kubernetes",
|
||||
"cluster_version": "v1.18.1",
|
||||
"cluster_api_endpoint": "https://10.10.10.2:6443",
|
||||
"cluster_ca_cert": (
|
||||
"-----BEGIN CERTIFICATE-----\n"
|
||||
"MIIE7TCCAtWgAwIBAgIDAODHMA0GCSqGSIb3DQEBCwUAMB8xEDAOBgNVBAoMB0V0\n"
|
||||
"Y2QgQ0ExCzAJBgNVBAMMAmNhMB4XDTIxMDIyMDE0MDcxOFoXDTMxMDIxODE0MDcx\n"
|
||||
"OFowHzEQMA4GA1UECgwHRXRjZCBDQTELMAkGA1UEAwwCY2EwggIiMA0GCSqGSIb3\n"
|
||||
"DQEBAQUAA4ICDwAwggIKAoICAQDL+NVHmb69Dl+D9M1g0eQa3uq4nThcwQ+gimbU\n"
|
||||
"GcBPBJmfDmrKvIxLde8RZ+tR+N77mT76qbHtS2KlYgIALJV0ZhujFzmytQ3r0T54\n"
|
||||
"bzrSfczfvQ5zx8kGc6KWmvi86VeuX26tEuN4Kklg1Lljrl9RJ3JJ7ck6Q92wVO1U\n"
|
||||
"kQmIWUZWclEOSBQbEj1p5CDZIRxf6ZIE57f1FoFzk9MaVVAOwZKgPiN5XSsHRmRT\n"
|
||||
"3igMP/X/seZQ7q8+Bg1pGxOwCGhxnxHGTzKXTE5VNXnLH2SYfm/RBrn3FxTE2Rp7\n"
|
||||
"hAjEnt+XZxw4Eju8oNahnIGVb0JWy1gJ6RMgtyQWs1cky7DfDQiF8RmciLuTx4Gy\n"
|
||||
"81W5RSelQDqrIQueBJrHBNF1nR7F9lu2+51ZgWeqdqLEwFzyjOFDem6vpskzMO75\n"
|
||||
"EwZMJlWi3ez/xdkYKqg38QKZRfRiIeoi8BbV4wnSXqyxBJ/DZ1NAwbumbP/GRU7j\n"
|
||||
"m6RS5wlMznwg55pXpiWLDFmJ7YFu+LU1WxYicE4qjPMYBn0OcMR4b8n/f5vGLd9O\n"
|
||||
"ZPzTLIt5B+9NqMpqoFePsS4anFFJvvhVEK4WwEFsmdii76bv7pYCBftlsEK7o1Mc\n"
|
||||
"6YFGoTpNZyDA9BFTp0CB7WArQDxQHikDLQzwpqwVZOjcJQN7Rzf0X4bHtW5NdgMJ\n"
|
||||
"NIhDCwIDAQABozIwMDANBgNVHREEBjAEggJjYTALBgNVHQ8EBAMCAoQwEgYDVR0T\n"
|
||||
"AQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAgEAuKu0XyKN9ZKHT1KvP1uc\n"
|
||||
"tzChXkeqQ47ciUbUvM56XvcaoB/4/VMax/2RuNBjC7XqXM+FkwZMBHnmU+hxcZ/Z\n"
|
||||
"evbF46y/puGqgDFgRKb4iSfo9JxnU5wsMX5lPwlYEuRmbJjyFvDNZdseNtH3Ws/4\n"
|
||||
"iQUGaiHl9HfOePQlb9QDprbRu/Dp6iE6Wai5fuURIXtB9PP5OD2q333vpYolmXXa\n"
|
||||
"e9ybwYD8E1X8MLQV0Irh/dJ+5T/eqtWUrZ2YhpCuAawGU35L/1ZqDT4rXW40BcoP\n"
|
||||
"cYSSr4ryWKGynYGjrnu2EnxHkYqIsgMDS/Jq8CjrZLpZ4E4TagXoZhIOa5Y3Yq9p\n"
|
||||
"yEH4zskY30BUoP7h8Bp7hZIIJ1LyI1F04mukJdHdVH89mhIkU5RuIOJoiBPOMkQw\n"
|
||||
"GmRIG8IYQMFxplwtebQrQpE6lnnIE2EdUxxqtpqAqPxnRf6LQg/gtjlGRotKiI9D\n"
|
||||
"6ypovjCQi49X4WBjiBFnrgma9MsFL2ZOJPX6XpGZ6jqBTAtVMcdb+hsZQMm8/M2Z\n"
|
||||
"QITmxBO+A1hkXGjofbo145omm5qFcWmbvvrnviv3iShEsCoIFpFnGf8RvWwNapeN\n"
|
||||
"W4WzyAwY1pQs7Er2KEixiPG7BGaC7KUD3l1kB/IeF0rpnO8rmW/Hq23eLRqtk7mF\n"
|
||||
"8M4zFA2c4PFD35Vu9ERU20E=\n"
|
||||
"-----END CERTIFICATE-----\n"),
|
||||
"admin_client_cert": (
|
||||
"-----BEGIN CERTIFICATE-----\n"
|
||||
"MIID/DCCAeSgAwIBAgIIRdk0W8Cf6RkwDQYJKoZIhvcNAQELBQAwHzEQMA4GA1UE\n"
|
||||
"CgwHRXRjZCBDQTELMAkGA1UEAwwCY2EwHhcNMjEwMjIwMTQwNzE4WhcNMjIwMjIw\n"
|
||||
"MTQyMzU4WjA0MRcwFQYDVQQKEw5zeXN0ZW06bWFzdGVyczEZMBcGA1UEAxMQa3Vi\n"
|
||||
"ZXJuZXRlcy1hZG1pbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ5x\n"
|
||||
"iFIxCMDtmbbkmuxPidugAOhcq8KQ7W7xiFKxzzyxEzOyoK3zsyL+vaMKSrq19Tc+\n"
|
||||
"bFcdm/zLPPS4RtjmUK5VP0Z5dA6a06PHlXJ/CMlZIHIQJolGYfYDg4Ky7oYFQ/KP\n"
|
||||
"4rtVGvyV7mSdhBdKIelgZ/45zyy10leq2oAWChi9P7kNX2pbwBxgLu1yCuz0f9d1\n"
|
||||
"hyx+hm11RDpUJKsbqNzgvP9nJUiSIbfcNAv7ut5RcC/mpITBdyiCnMMs6DvpC3ao\n"
|
||||
"xKTks2XWpxgK3Ay1LYjkpaqtMuYK3dGps0Au5b/fSUlJqfzbD0I6wmZYlZK/x/E9\n"
|
||||
"+aAALAceGudvBovWxW0CAwEAAaMnMCUwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQM\n"
|
||||
"MAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4ICAQCkVUfWJ/sHdKN++Uaw+wu2\n"
|
||||
"GFtBnk50btuKcB9vf9fGJK7LHWI5xSMs0kiCXHi3tpa/LvWj0FFWZZ/2fwaV/hfM\n"
|
||||
"VJUk0pF2Xjp9IuzFcI/SJWROX/TmZUxFSUL1LMaojdbLqPmIcBRJE9Kd/f0+hmtt\n"
|
||||
"2v9o8E52F8pTSG98dGAvWBfsaktiUos2FbYAJE2UKX5dTnLBLJws55xHx5isHkb5\n"
|
||||
"I8wb+NbSlKq2Hs4oR0SAjCo+2P+Ej3YblwitPkhV7AkzljHdyKr/f+QT29qgYrW2\n"
|
||||
"qi7Ftg/9fBsiiCLjLp+DJrfJQR1YnTVuhv8PCTO46IFzT3zxVe/A3EnKj/kps2y8\n"
|
||||
"qeMeDHvxEACoSXQoE2yZVyCKqp1FEjawXeAS3QAicFdoSAjhC5FSTnRs28UE6tXB\n"
|
||||
"VqWUUG0FY2/zwswAfIktClJ492utO0HBJt76HcRfR1699Qmfx6fLFKQUDM6fxJk6\n"
|
||||
"79QI3S2s3eiCwiPtHOUAz7LC5KV6c75Yq+LABY9eN5K4EI6fuD8cEhfDj3iBb3bB\n"
|
||||
"0jJp0bFsCpD90Nrx253XiVesHiKhLlvnNUVuAylDcvwt8xVv+uuBl4kpVv4kkyT/\n"
|
||||
"ApqqvGKcUwQp9jIdY9nSZ/SZRW8QFzf404UVeiH+Ruu6+CCqh2PLAtDnSCPVRt1e\n"
|
||||
"O+hShAzOqQGF72F6XYlx/g==\n"
|
||||
"-----END CERTIFICATE-----\n"),
|
||||
"admin_client_key": (
|
||||
"-----BEGIN RSA PRIVATE KEY-----\n"
|
||||
"MIIEpAIBAAKCAQEAnnGIUjEIwO2ZtuSa7E+J26AA6FyrwpDtbvGIUrHPPLETM7Kg\n"
|
||||
"rfOzIv69owpKurX1Nz5sVx2b/Ms89LhG2OZQrlU/Rnl0DprTo8eVcn8IyVkgchAm\n"
|
||||
"iUZh9gODgrLuhgVD8o/iu1Ua/JXuZJ2EF0oh6WBn/jnPLLXSV6ragBYKGL0/uQ1f\n"
|
||||
"alvAHGAu7XIK7PR/13WHLH6GbXVEOlQkqxuo3OC8/2clSJIht9w0C/u63lFwL+ak\n"
|
||||
"hMF3KIKcwyzoO+kLdqjEpOSzZdanGArcDLUtiOSlqq0y5grd0amzQC7lv99JSUmp\n"
|
||||
"/NsPQjrCZliVkr/H8T35oAAsBx4a528Gi9bFbQIDAQABAoIBAQCJzUZ57ammWj/x\n"
|
||||
"oJvZYUgOGvgPH+JG41ONxUYCXiFWsM95jCdRg33Otu3qKl5aSz0Noh4KGnd7gqvu\n"
|
||||
"T4NWy+Fp7jyNJ763oRLnBAPHxBK5Q+oDKmbJx8wVcnLjronjSBsTkO7qbRd+jUv8\n"
|
||||
"eD7VHqWl2zI3GsJEKZLaqn9FHWYEot2s17obd//4lJPcBg6kGhHDGkJFm7xvVELa\n"
|
||||
"VXCIN1E9bAoIgv3pie+O53FH0YoXptvYG4F+ffHGk8/cbdcBJ4oLJqF2mJiwuBbf\n"
|
||||
"GYa5T/rIoPkrnc+kmGcePC6pPjPxttHvyaWIDQZj4Jcy4oz6tzFUF0oEZ2/JfMBt\n"
|
||||
"Il13gqylAoGBAMU/oaxXHM//NRQqMlL9R8LYLcnze2rnqt+T0ORUZQGETSSXaTxv\n"
|
||||
"I4T2wyy9yB583RDVJNXp4T3Js2XnweNj8pRRsCjxY1lkpSOaLVqAw/1HwK1DOSEG\n"
|
||||
"EqW8s37YOPZWGAYIhpfEbD5y960JUjVsuW71w/5cDWkoi1eyeFVbuXg7AoGBAM2i\n"
|
||||
"+0A6IrZsy/oIJoF8xmOEH78zEMv8W6Qmfb24N0hdcaFJnM48aF50wk8+/YgMNI7J\n"
|
||||
"kKR7JJAIQmIFn8tYji9zeeRny4iAclRb6ecglCspvxLzF7tci3ae1snaOFs2wz6b\n"
|
||||
"MkLSfb4nNf2u3dsJ2Z0tU8Tb7pxCDH/yEjCRA4Z3AoGAM/T58jqUFVnlMmWXEfMz\n"
|
||||
"puhoz0x6kwNpKDF4kdyFKqwd4eicSNYBpjGV4cAv6Y/8b0WlyU8tDKiHv+0XTn1y\n"
|
||||
"VY1a+L307IQtV75x+ef3OE1hPIJ7lu5RlSSqp1vvTTwKYfR2950+4ghIo2TUKcx0\n"
|
||||
"3/yO3v6CbdPHOJeDSQC7TycCgYEAq61XyaU/ecGXAaVwUEaVclvKDVxat5J2B7NC\n"
|
||||
"4vM65CVvSlIkoWF5WPJtjq9uBvj5oAPTyB4uxji/Awri/2dtPVxQ9UlaeRmTWa5q\n"
|
||||
"ttVSHj76EJ32wCthG6U8eMTArBYqJsh2y6bj567gumwVOFse3MQM3ZsnuDjEKsU0\n"
|
||||
"Pmuy370CgYAULotjgbSNBZcJu2a2urX+vtjPwpGSsiKYdVHBvspcwWcu8YaaeAde\n"
|
||||
"71781PJbFV7v45nT2thc+w9IYemXATH/cOO+JVUMYqZY0c+AOa8bvjnMY5Z6cS6Y\n"
|
||||
"WJC6NHVmvvFb1YhXjQz2GA9GGBmx9+5/vaPp4aPp+VMfdt9MkEV/NQ==\n"
|
||||
"-----END RSA PRIVATE KEY-----\n"),
|
||||
"admin_user": "kubernetes-admin",
|
||||
"admin_token": (
|
||||
"ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklpMXpXRFZyUkVreFZqQTVUVTVU"
|
||||
"UmtOSFVuQTBVSE5PVTNWdlJFaG1RM1ozT1VGMU1UbGZZemhtVFZraWZRLmV5SnBj"
|
||||
"M01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmla"
|
||||
"WEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlP"
|
||||
"aUpyZFdKbExYTjVjM1JsYlNJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpa"
|
||||
"V0ZqWTI5MWJuUXZjMlZqY21WMExtNWhiV1VpT2lKcmRXSmxjbTVsZEdWekxXRmti"
|
||||
"V2x1TFhSdmEyVnVMVFIyYzNCdElpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJh"
|
||||
"V05sWVdOamIzVnVkQzl6WlhKMmFXTmxMV0ZqWTI5MWJuUXVibUZ0WlNJNkltdDFZ"
|
||||
"bVZ5Ym1WMFpYTXRZV1J0YVc0aUxDSnJkV0psY201bGRHVnpMbWx2TDNObGNuWnBZ"
|
||||
"MlZoWTJOdmRXNTBMM05sY25acFkyVXRZV05qYjNWdWRDNTFhV1FpT2lJMFlURm1a"
|
||||
"VEpqTlMweU5qQTJMVFJoWWpRdFlqTXlNUzB5TjJWak1HRXdZVFkyTnpnaUxDSnpk"
|
||||
"V0lpT2lKemVYTjBaVzA2YzJWeWRtbGpaV0ZqWTI5MWJuUTZhM1ZpWlMxemVYTjBa"
|
||||
"VzA2YTNWaVpYSnVaWFJsY3kxaFpHMXBiaUo5LlhyRU5hNXI5SXRwOGJjM25aMVZo"
|
||||
"ZkJlUEFaQ1l2dU5oUVFLYVhNWXlLVjZmQXFiSENIQi1kVnJUYXcxbWs5YXdIQmVz"
|
||||
"MXhKUFliVHdzU2dacTZkdFlLYjZuY2RGUUpCYjM2aGJ0NnJ4WnJsZlNYRzFVS2xy"
|
||||
"MlQ4ZW1KaFVCV3hFSzVXazRLU1ZobnVBcmJDLUU3MDNTd0hVdEU2UUhDWkRGTWFk"
|
||||
"QUoyajJDNmo2RktoLXIwUWpfQ1I4TzBVUTF4c0I0YW9ZS05rUGUxeFJZSVZKUTFW"
|
||||
"TjlFdkFaa3lUUFhORDhpUV9hQVFuSlBfUFlCS09OLTAyTnZOY3llVjZ1LWNzdzI3"
|
||||
"NVAyYXJIeGdLLXZrMG5Ec1FkTkR5S3hBY2t3Skc3bkVyVmJkNVJoY2JiN2gwX2Jx"
|
||||
"dmt4QnJmaEJ5STE4c3k1WFdQTGE4cThIVVE3d092RlpXUQ==")
|
||||
}
|
||||
|
||||
fixtures = {
|
||||
'/v1/kube_clusters':
|
||||
{
|
||||
'GET': (
|
||||
{},
|
||||
{"kube_clusters": [FAKE_CLUSTER]},
|
||||
),
|
||||
},
|
||||
'/v1/kube_clusters/%s' % FAKE_CLUSTER['cluster_name']:
|
||||
{
|
||||
'GET': (
|
||||
{},
|
||||
FAKE_CLUSTER,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class KubeClusterManagerTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(KubeClusterManagerTest, self).setUp()
|
||||
self.api = utils.FakeAPI(fixtures)
|
||||
self.mgr = cgtsclient.v1.kube_cluster.KubeClusterManager(self.api)
|
||||
|
||||
def test_kube_cluster_list(self):
|
||||
kube_clusters = self.mgr.list()
|
||||
expect = [
|
||||
('GET', '/v1/kube_clusters', {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(len(kube_clusters), 1)
|
||||
|
||||
def test_kube_cluster_show(self):
|
||||
kube_cluster = self.mgr.get(FAKE_CLUSTER['cluster_name'])
|
||||
expect = [
|
||||
('GET', '/v1/kube_clusters/%s' % FAKE_CLUSTER['cluster_name'], {}, None),
|
||||
]
|
||||
self.assertEqual(self.api.calls, expect)
|
||||
self.assertEqual(kube_cluster.cluster_name,
|
||||
FAKE_CLUSTER['cluster_name'])
|
||||
self.assertEqual(kube_cluster.cluster_version,
|
||||
FAKE_CLUSTER['cluster_version'])
|
||||
self.assertEqual(kube_cluster.cluster_api_endpoint,
|
||||
FAKE_CLUSTER['cluster_api_endpoint'])
|
||||
self.assertEqual(kube_cluster.cluster_ca_cert,
|
||||
FAKE_CLUSTER['cluster_ca_cert'])
|
||||
self.assertEqual(kube_cluster.admin_client_cert,
|
||||
FAKE_CLUSTER['admin_client_cert'])
|
||||
self.assertEqual(kube_cluster.admin_client_key,
|
||||
FAKE_CLUSTER['admin_client_key'])
|
||||
self.assertEqual(kube_cluster.admin_user,
|
||||
FAKE_CLUSTER['admin_user'])
|
||||
self.assertEqual(kube_cluster.admin_token,
|
||||
FAKE_CLUSTER['admin_token'])
|
|
@ -0,0 +1,158 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
from cgtsclient.tests import test_shell
|
||||
from cgtsclient.v1.kube_cluster import KubeCluster
|
||||
|
||||
|
||||
FAKE_CLUSTER = {
|
||||
"cluster_name": "kubernetes",
|
||||
"cluster_version": "v1.18.1",
|
||||
"cluster_api_endpoint": "https://10.10.10.2:6443",
|
||||
"cluster_ca_cert": (
|
||||
"-----BEGIN CERTIFICATE-----\n"
|
||||
"MIIE7TCCAtWgAwIBAgIDAODHMA0GCSqGSIb3DQEBCwUAMB8xEDAOBgNVBAoMB0V0\n"
|
||||
"Y2QgQ0ExCzAJBgNVBAMMAmNhMB4XDTIxMDIyMDE0MDcxOFoXDTMxMDIxODE0MDcx\n"
|
||||
"OFowHzEQMA4GA1UECgwHRXRjZCBDQTELMAkGA1UEAwwCY2EwggIiMA0GCSqGSIb3\n"
|
||||
"DQEBAQUAA4ICDwAwggIKAoICAQDL+NVHmb69Dl+D9M1g0eQa3uq4nThcwQ+gimbU\n"
|
||||
"GcBPBJmfDmrKvIxLde8RZ+tR+N77mT76qbHtS2KlYgIALJV0ZhujFzmytQ3r0T54\n"
|
||||
"bzrSfczfvQ5zx8kGc6KWmvi86VeuX26tEuN4Kklg1Lljrl9RJ3JJ7ck6Q92wVO1U\n"
|
||||
"kQmIWUZWclEOSBQbEj1p5CDZIRxf6ZIE57f1FoFzk9MaVVAOwZKgPiN5XSsHRmRT\n"
|
||||
"3igMP/X/seZQ7q8+Bg1pGxOwCGhxnxHGTzKXTE5VNXnLH2SYfm/RBrn3FxTE2Rp7\n"
|
||||
"hAjEnt+XZxw4Eju8oNahnIGVb0JWy1gJ6RMgtyQWs1cky7DfDQiF8RmciLuTx4Gy\n"
|
||||
"81W5RSelQDqrIQueBJrHBNF1nR7F9lu2+51ZgWeqdqLEwFzyjOFDem6vpskzMO75\n"
|
||||
"EwZMJlWi3ez/xdkYKqg38QKZRfRiIeoi8BbV4wnSXqyxBJ/DZ1NAwbumbP/GRU7j\n"
|
||||
"m6RS5wlMznwg55pXpiWLDFmJ7YFu+LU1WxYicE4qjPMYBn0OcMR4b8n/f5vGLd9O\n"
|
||||
"ZPzTLIt5B+9NqMpqoFePsS4anFFJvvhVEK4WwEFsmdii76bv7pYCBftlsEK7o1Mc\n"
|
||||
"6YFGoTpNZyDA9BFTp0CB7WArQDxQHikDLQzwpqwVZOjcJQN7Rzf0X4bHtW5NdgMJ\n"
|
||||
"NIhDCwIDAQABozIwMDANBgNVHREEBjAEggJjYTALBgNVHQ8EBAMCAoQwEgYDVR0T\n"
|
||||
"AQH/BAgwBgEB/wIBATANBgkqhkiG9w0BAQsFAAOCAgEAuKu0XyKN9ZKHT1KvP1uc\n"
|
||||
"tzChXkeqQ47ciUbUvM56XvcaoB/4/VMax/2RuNBjC7XqXM+FkwZMBHnmU+hxcZ/Z\n"
|
||||
"evbF46y/puGqgDFgRKb4iSfo9JxnU5wsMX5lPwlYEuRmbJjyFvDNZdseNtH3Ws/4\n"
|
||||
"iQUGaiHl9HfOePQlb9QDprbRu/Dp6iE6Wai5fuURIXtB9PP5OD2q333vpYolmXXa\n"
|
||||
"e9ybwYD8E1X8MLQV0Irh/dJ+5T/eqtWUrZ2YhpCuAawGU35L/1ZqDT4rXW40BcoP\n"
|
||||
"cYSSr4ryWKGynYGjrnu2EnxHkYqIsgMDS/Jq8CjrZLpZ4E4TagXoZhIOa5Y3Yq9p\n"
|
||||
"yEH4zskY30BUoP7h8Bp7hZIIJ1LyI1F04mukJdHdVH89mhIkU5RuIOJoiBPOMkQw\n"
|
||||
"GmRIG8IYQMFxplwtebQrQpE6lnnIE2EdUxxqtpqAqPxnRf6LQg/gtjlGRotKiI9D\n"
|
||||
"6ypovjCQi49X4WBjiBFnrgma9MsFL2ZOJPX6XpGZ6jqBTAtVMcdb+hsZQMm8/M2Z\n"
|
||||
"QITmxBO+A1hkXGjofbo145omm5qFcWmbvvrnviv3iShEsCoIFpFnGf8RvWwNapeN\n"
|
||||
"W4WzyAwY1pQs7Er2KEixiPG7BGaC7KUD3l1kB/IeF0rpnO8rmW/Hq23eLRqtk7mF\n"
|
||||
"8M4zFA2c4PFD35Vu9ERU20E=\n"
|
||||
"-----END CERTIFICATE-----\n"),
|
||||
"admin_client_cert": (
|
||||
"-----BEGIN CERTIFICATE-----\n"
|
||||
"MIID/DCCAeSgAwIBAgIIRdk0W8Cf6RkwDQYJKoZIhvcNAQELBQAwHzEQMA4GA1UE\n"
|
||||
"CgwHRXRjZCBDQTELMAkGA1UEAwwCY2EwHhcNMjEwMjIwMTQwNzE4WhcNMjIwMjIw\n"
|
||||
"MTQyMzU4WjA0MRcwFQYDVQQKEw5zeXN0ZW06bWFzdGVyczEZMBcGA1UEAxMQa3Vi\n"
|
||||
"ZXJuZXRlcy1hZG1pbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ5x\n"
|
||||
"iFIxCMDtmbbkmuxPidugAOhcq8KQ7W7xiFKxzzyxEzOyoK3zsyL+vaMKSrq19Tc+\n"
|
||||
"bFcdm/zLPPS4RtjmUK5VP0Z5dA6a06PHlXJ/CMlZIHIQJolGYfYDg4Ky7oYFQ/KP\n"
|
||||
"4rtVGvyV7mSdhBdKIelgZ/45zyy10leq2oAWChi9P7kNX2pbwBxgLu1yCuz0f9d1\n"
|
||||
"hyx+hm11RDpUJKsbqNzgvP9nJUiSIbfcNAv7ut5RcC/mpITBdyiCnMMs6DvpC3ao\n"
|
||||
"xKTks2XWpxgK3Ay1LYjkpaqtMuYK3dGps0Au5b/fSUlJqfzbD0I6wmZYlZK/x/E9\n"
|
||||
"+aAALAceGudvBovWxW0CAwEAAaMnMCUwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQM\n"
|
||||
"MAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4ICAQCkVUfWJ/sHdKN++Uaw+wu2\n"
|
||||
"GFtBnk50btuKcB9vf9fGJK7LHWI5xSMs0kiCXHi3tpa/LvWj0FFWZZ/2fwaV/hfM\n"
|
||||
"VJUk0pF2Xjp9IuzFcI/SJWROX/TmZUxFSUL1LMaojdbLqPmIcBRJE9Kd/f0+hmtt\n"
|
||||
"2v9o8E52F8pTSG98dGAvWBfsaktiUos2FbYAJE2UKX5dTnLBLJws55xHx5isHkb5\n"
|
||||
"I8wb+NbSlKq2Hs4oR0SAjCo+2P+Ej3YblwitPkhV7AkzljHdyKr/f+QT29qgYrW2\n"
|
||||
"qi7Ftg/9fBsiiCLjLp+DJrfJQR1YnTVuhv8PCTO46IFzT3zxVe/A3EnKj/kps2y8\n"
|
||||
"qeMeDHvxEACoSXQoE2yZVyCKqp1FEjawXeAS3QAicFdoSAjhC5FSTnRs28UE6tXB\n"
|
||||
"VqWUUG0FY2/zwswAfIktClJ492utO0HBJt76HcRfR1699Qmfx6fLFKQUDM6fxJk6\n"
|
||||
"79QI3S2s3eiCwiPtHOUAz7LC5KV6c75Yq+LABY9eN5K4EI6fuD8cEhfDj3iBb3bB\n"
|
||||
"0jJp0bFsCpD90Nrx253XiVesHiKhLlvnNUVuAylDcvwt8xVv+uuBl4kpVv4kkyT/\n"
|
||||
"ApqqvGKcUwQp9jIdY9nSZ/SZRW8QFzf404UVeiH+Ruu6+CCqh2PLAtDnSCPVRt1e\n"
|
||||
"O+hShAzOqQGF72F6XYlx/g==\n"
|
||||
"-----END CERTIFICATE-----\n"),
|
||||
"admin_client_key": (
|
||||
"-----BEGIN RSA PRIVATE KEY-----\n"
|
||||
"MIIEpAIBAAKCAQEAnnGIUjEIwO2ZtuSa7E+J26AA6FyrwpDtbvGIUrHPPLETM7Kg\n"
|
||||
"rfOzIv69owpKurX1Nz5sVx2b/Ms89LhG2OZQrlU/Rnl0DprTo8eVcn8IyVkgchAm\n"
|
||||
"iUZh9gODgrLuhgVD8o/iu1Ua/JXuZJ2EF0oh6WBn/jnPLLXSV6ragBYKGL0/uQ1f\n"
|
||||
"alvAHGAu7XIK7PR/13WHLH6GbXVEOlQkqxuo3OC8/2clSJIht9w0C/u63lFwL+ak\n"
|
||||
"hMF3KIKcwyzoO+kLdqjEpOSzZdanGArcDLUtiOSlqq0y5grd0amzQC7lv99JSUmp\n"
|
||||
"/NsPQjrCZliVkr/H8T35oAAsBx4a528Gi9bFbQIDAQABAoIBAQCJzUZ57ammWj/x\n"
|
||||
"oJvZYUgOGvgPH+JG41ONxUYCXiFWsM95jCdRg33Otu3qKl5aSz0Noh4KGnd7gqvu\n"
|
||||
"T4NWy+Fp7jyNJ763oRLnBAPHxBK5Q+oDKmbJx8wVcnLjronjSBsTkO7qbRd+jUv8\n"
|
||||
"eD7VHqWl2zI3GsJEKZLaqn9FHWYEot2s17obd//4lJPcBg6kGhHDGkJFm7xvVELa\n"
|
||||
"VXCIN1E9bAoIgv3pie+O53FH0YoXptvYG4F+ffHGk8/cbdcBJ4oLJqF2mJiwuBbf\n"
|
||||
"GYa5T/rIoPkrnc+kmGcePC6pPjPxttHvyaWIDQZj4Jcy4oz6tzFUF0oEZ2/JfMBt\n"
|
||||
"Il13gqylAoGBAMU/oaxXHM//NRQqMlL9R8LYLcnze2rnqt+T0ORUZQGETSSXaTxv\n"
|
||||
"I4T2wyy9yB583RDVJNXp4T3Js2XnweNj8pRRsCjxY1lkpSOaLVqAw/1HwK1DOSEG\n"
|
||||
"EqW8s37YOPZWGAYIhpfEbD5y960JUjVsuW71w/5cDWkoi1eyeFVbuXg7AoGBAM2i\n"
|
||||
"+0A6IrZsy/oIJoF8xmOEH78zEMv8W6Qmfb24N0hdcaFJnM48aF50wk8+/YgMNI7J\n"
|
||||
"kKR7JJAIQmIFn8tYji9zeeRny4iAclRb6ecglCspvxLzF7tci3ae1snaOFs2wz6b\n"
|
||||
"MkLSfb4nNf2u3dsJ2Z0tU8Tb7pxCDH/yEjCRA4Z3AoGAM/T58jqUFVnlMmWXEfMz\n"
|
||||
"puhoz0x6kwNpKDF4kdyFKqwd4eicSNYBpjGV4cAv6Y/8b0WlyU8tDKiHv+0XTn1y\n"
|
||||
"VY1a+L307IQtV75x+ef3OE1hPIJ7lu5RlSSqp1vvTTwKYfR2950+4ghIo2TUKcx0\n"
|
||||
"3/yO3v6CbdPHOJeDSQC7TycCgYEAq61XyaU/ecGXAaVwUEaVclvKDVxat5J2B7NC\n"
|
||||
"4vM65CVvSlIkoWF5WPJtjq9uBvj5oAPTyB4uxji/Awri/2dtPVxQ9UlaeRmTWa5q\n"
|
||||
"ttVSHj76EJ32wCthG6U8eMTArBYqJsh2y6bj567gumwVOFse3MQM3ZsnuDjEKsU0\n"
|
||||
"Pmuy370CgYAULotjgbSNBZcJu2a2urX+vtjPwpGSsiKYdVHBvspcwWcu8YaaeAde\n"
|
||||
"71781PJbFV7v45nT2thc+w9IYemXATH/cOO+JVUMYqZY0c+AOa8bvjnMY5Z6cS6Y\n"
|
||||
"WJC6NHVmvvFb1YhXjQz2GA9GGBmx9+5/vaPp4aPp+VMfdt9MkEV/NQ==\n"
|
||||
"-----END RSA PRIVATE KEY-----\n"),
|
||||
"admin_user": "kubernetes-admin",
|
||||
"admin_token": (
|
||||
"ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklpMXpXRFZyUkVreFZqQTVUVTVU"
|
||||
"UmtOSFVuQTBVSE5PVTNWdlJFaG1RM1ozT1VGMU1UbGZZemhtVFZraWZRLmV5SnBj"
|
||||
"M01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmla"
|
||||
"WEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlP"
|
||||
"aUpyZFdKbExYTjVjM1JsYlNJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpa"
|
||||
"V0ZqWTI5MWJuUXZjMlZqY21WMExtNWhiV1VpT2lKcmRXSmxjbTVsZEdWekxXRmti"
|
||||
"V2x1TFhSdmEyVnVMVFIyYzNCdElpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJh"
|
||||
"V05sWVdOamIzVnVkQzl6WlhKMmFXTmxMV0ZqWTI5MWJuUXVibUZ0WlNJNkltdDFZ"
|
||||
"bVZ5Ym1WMFpYTXRZV1J0YVc0aUxDSnJkV0psY201bGRHVnpMbWx2TDNObGNuWnBZ"
|
||||
"MlZoWTJOdmRXNTBMM05sY25acFkyVXRZV05qYjNWdWRDNTFhV1FpT2lJMFlURm1a"
|
||||
"VEpqTlMweU5qQTJMVFJoWWpRdFlqTXlNUzB5TjJWak1HRXdZVFkyTnpnaUxDSnpk"
|
||||
"V0lpT2lKemVYTjBaVzA2YzJWeWRtbGpaV0ZqWTI5MWJuUTZhM1ZpWlMxemVYTjBa"
|
||||
"VzA2YTNWaVpYSnVaWFJsY3kxaFpHMXBiaUo5LlhyRU5hNXI5SXRwOGJjM25aMVZo"
|
||||
"ZkJlUEFaQ1l2dU5oUVFLYVhNWXlLVjZmQXFiSENIQi1kVnJUYXcxbWs5YXdIQmVz"
|
||||
"MXhKUFliVHdzU2dacTZkdFlLYjZuY2RGUUpCYjM2aGJ0NnJ4WnJsZlNYRzFVS2xy"
|
||||
"MlQ4ZW1KaFVCV3hFSzVXazRLU1ZobnVBcmJDLUU3MDNTd0hVdEU2UUhDWkRGTWFk"
|
||||
"QUoyajJDNmo2RktoLXIwUWpfQ1I4TzBVUTF4c0I0YW9ZS05rUGUxeFJZSVZKUTFW"
|
||||
"TjlFdkFaa3lUUFhORDhpUV9hQVFuSlBfUFlCS09OLTAyTnZOY3llVjZ1LWNzdzI3"
|
||||
"NVAyYXJIeGdLLXZrMG5Ec1FkTkR5S3hBY2t3Skc3bkVyVmJkNVJoY2JiN2gwX2Jx"
|
||||
"dmt4QnJmaEJ5STE4c3k1WFdQTGE4cThIVVE3d092RlpXUQ==")
|
||||
}
|
||||
|
||||
|
||||
class KubeClusterTest(test_shell.ShellTest):
|
||||
|
||||
def setUp(self):
|
||||
super(KubeClusterTest, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
super(KubeClusterTest, self).tearDown()
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_cluster.KubeClusterManager.list')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_cluster_list(self, mock_get_endpoint, mock_get_client,
|
||||
mock_list):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
mock_list.return_value = [KubeCluster(None, FAKE_CLUSTER, True)]
|
||||
self.make_env()
|
||||
cluster_results = self.shell("kube-cluster-list")
|
||||
self.assertIn(FAKE_CLUSTER['cluster_name'], cluster_results)
|
||||
self.assertIn(FAKE_CLUSTER['cluster_version'], cluster_results)
|
||||
self.assertIn(FAKE_CLUSTER['cluster_api_endpoint'], cluster_results)
|
||||
|
||||
@mock.patch('cgtsclient.v1.kube_cluster.KubeClusterManager.get')
|
||||
@mock.patch('cgtsclient.client._get_ksclient')
|
||||
@mock.patch('cgtsclient.client._get_endpoint')
|
||||
def test_kube_cluster_show(self, mock_get_endpoint, mock_get_client,
|
||||
mock_get):
|
||||
mock_get_endpoint.return_value = 'http://fakelocalhost:6385/v1'
|
||||
mock_get.return_value = KubeCluster(None, FAKE_CLUSTER, True)
|
||||
self.make_env()
|
||||
cluster_results = self.shell("kube-cluster-show {}".format(
|
||||
FAKE_CLUSTER['cluster_name']))
|
||||
self.assertIn(FAKE_CLUSTER['cluster_name'], cluster_results)
|
||||
self.assertIn(FAKE_CLUSTER['cluster_version'], cluster_results)
|
||||
self.assertIn(FAKE_CLUSTER['cluster_api_endpoint'], cluster_results)
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2015 Wind River Systems, Inc.
|
||||
# Copyright (c) 2015-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -88,10 +88,23 @@ def _get_range_tuples(data):
|
|||
@utils.arg('--order',
|
||||
metavar='<sequential | random>',
|
||||
help="The allocation order within the start/end range")
|
||||
@utils.arg('--floating-address',
|
||||
metavar='<floating address>',
|
||||
help="The floating address")
|
||||
@utils.arg('--controller0-address',
|
||||
metavar='<controller0 address>',
|
||||
help="The address of controller-0")
|
||||
@utils.arg('--controller1-address',
|
||||
metavar='<controller1 address>',
|
||||
help="The address of controller-1")
|
||||
@utils.arg('--gateway-address',
|
||||
metavar='<gateway address>',
|
||||
help="The gateway address")
|
||||
def do_addrpool_add(cc, args):
|
||||
"""Add an IP address pool."""
|
||||
|
||||
field_list = ['name', 'network', 'prefix', 'order', 'ranges']
|
||||
field_list = ['name', 'network', 'prefix', 'order', 'ranges', 'floating_address',
|
||||
'controller0_address', 'controller1_address', 'gateway_address']
|
||||
|
||||
# Prune input fields down to required/expected values
|
||||
data = dict((k, v) for (k, v) in vars(args).items()
|
||||
|
|
|
@ -40,7 +40,7 @@ class AppManager(base.Manager):
|
|||
def upload(self, data):
|
||||
"""Stage the specified application, getting it ready for deployment.
|
||||
|
||||
:param data: application name and location of tarfile
|
||||
:param data: application name and location of tarfile and the binary of the tarfile
|
||||
"""
|
||||
return self._create(self._path(), data)
|
||||
|
||||
|
@ -61,12 +61,13 @@ class AppManager(base.Manager):
|
|||
resp, body = self.api.json_request('POST', self._path() + "/update", body=data)
|
||||
return self.resource_class(self, body)
|
||||
|
||||
def remove(self, app_name):
|
||||
def remove(self, app_name, force):
|
||||
"""Uninstall the specified application
|
||||
|
||||
:param name: app_name
|
||||
:param force: True/False - cli flag/argument
|
||||
"""
|
||||
return self._update(self._path(app_name) + '?directive=remove',
|
||||
return self._update(self._path(app_name) + '?directive=remove&force=' + str(force),
|
||||
{'values': {}})
|
||||
|
||||
def abort(self, app_name):
|
||||
|
@ -77,12 +78,13 @@ class AppManager(base.Manager):
|
|||
return self._update(self._path(app_name) + '?directive=abort',
|
||||
{'values': {}})
|
||||
|
||||
def delete(self, app_name):
|
||||
def delete(self, app_name, force):
|
||||
"""Delete application data
|
||||
|
||||
:param name: app_name
|
||||
:param force: True/False - cli flag/argument
|
||||
"""
|
||||
return self._delete(self._path(app_name))
|
||||
return self._delete(self._path(app_name) + '?force=' + str(force))
|
||||
|
||||
|
||||
def _find_app(cc, app_name):
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
|
||||
|
@ -102,9 +103,28 @@ def do_application_show(cc, args):
|
|||
@utils.arg('-v', '--app-version',
|
||||
metavar='<app version>',
|
||||
help='Version of the application')
|
||||
@utils.arg('-i', '--images',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Save application images in the registry as part of app'
|
||||
' upload. This option is normally used in the System'
|
||||
' Controller of a Distributed Cloud system to also upload'
|
||||
' the application images to the central registry when the'
|
||||
' app is registered within the system')
|
||||
def do_application_upload(cc, args):
|
||||
"""Upload application Helm chart(s) and manifest"""
|
||||
data = _application_check(args)
|
||||
if args.images:
|
||||
data.update({'images': True})
|
||||
|
||||
if not _is_url(data["tarfile"]):
|
||||
try:
|
||||
with open(data["tarfile"], 'rb') as tarfile:
|
||||
binary_data = base64.urlsafe_b64encode(tarfile.read())
|
||||
data.update({'binary_data': binary_data})
|
||||
except Exception:
|
||||
raise exc.CommandError("Error: Could not open file %s." % data["tarfile"])
|
||||
|
||||
response = cc.app.upload(data)
|
||||
_print_application_show(response)
|
||||
_print_reminder_msg(response.name)
|
||||
|
@ -162,10 +182,14 @@ def do_application_apply(cc, args):
|
|||
|
||||
@utils.arg('name', metavar='<app name>',
|
||||
help='Name of the application to be uninstalled')
|
||||
@utils.arg('-f', '--force',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="Force a remove operation")
|
||||
def do_application_remove(cc, args):
|
||||
"""Uninstall the application"""
|
||||
try:
|
||||
response = cc.app.remove(args.name)
|
||||
response = cc.app.remove(args.name, args.force)
|
||||
_print_application_show(response)
|
||||
_print_reminder_msg(args.name)
|
||||
except exc.HTTPNotFound:
|
||||
|
@ -188,10 +212,14 @@ def do_application_abort(cc, args):
|
|||
|
||||
@utils.arg('name', metavar='<application name>',
|
||||
help='Name of the application to be deleted')
|
||||
@utils.arg('-f', '--force',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help="Force a delete operation")
|
||||
def do_application_delete(cc, args):
|
||||
"""Remove the uninstalled application from the system"""
|
||||
try:
|
||||
cc.app.delete(args.name)
|
||||
cc.app.delete(args.name, args.force)
|
||||
print('Application %s deleted.' % args.name)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('Application not found: %s' % args.name)
|
||||
|
|
|
@ -15,7 +15,7 @@ from cgtsclient import exc
|
|||
|
||||
def _print_certificate_show(certificate):
|
||||
fields = ['uuid', 'certtype', 'signature', 'start_date', 'expiry_date']
|
||||
if type(certificate) is dict:
|
||||
if isinstance(certificate, dict):
|
||||
data = [(f, certificate.get(f, '')) for f in fields]
|
||||
details = ('details', certificate.get('details', ''))
|
||||
else:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2012 OpenStack LLC.
|
||||
# Copyright 2012-2020 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
|
@ -13,7 +13,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
|
@ -35,7 +35,6 @@ from cgtsclient.v1 import fernet
|
|||
from cgtsclient.v1 import health
|
||||
from cgtsclient.v1 import helm
|
||||
from cgtsclient.v1 import host_fs
|
||||
from cgtsclient.v1 import icommunity
|
||||
from cgtsclient.v1 import icpu
|
||||
from cgtsclient.v1 import idisk
|
||||
from cgtsclient.v1 import idns
|
||||
|
@ -54,8 +53,8 @@ from cgtsclient.v1 import isensor
|
|||
from cgtsclient.v1 import isensorgroup
|
||||
from cgtsclient.v1 import istor
|
||||
from cgtsclient.v1 import isystem
|
||||
from cgtsclient.v1 import itrapdest
|
||||
from cgtsclient.v1 import iuser
|
||||
from cgtsclient.v1 import kube_cluster
|
||||
from cgtsclient.v1 import kube_host_upgrade
|
||||
from cgtsclient.v1 import kube_upgrade
|
||||
from cgtsclient.v1 import kube_version
|
||||
|
@ -81,6 +80,7 @@ from cgtsclient.v1 import sm_servicegroup
|
|||
from cgtsclient.v1 import storage_backend
|
||||
from cgtsclient.v1 import storage_ceph
|
||||
from cgtsclient.v1 import storage_ceph_external
|
||||
from cgtsclient.v1 import storage_ceph_rook
|
||||
from cgtsclient.v1 import storage_external
|
||||
from cgtsclient.v1 import storage_file
|
||||
from cgtsclient.v1 import storage_lvm
|
||||
|
@ -124,11 +124,10 @@ class Client(http.HTTPClient):
|
|||
self.storage_file = storage_file.StorageFileManager(self)
|
||||
self.storage_external = storage_external.StorageExternalManager(self)
|
||||
self.storage_ceph = storage_ceph.StorageCephManager(self)
|
||||
self.storage_ceph_rook = storage_ceph_rook.StorageCephRookManager(self)
|
||||
self.ceph_mon = ceph_mon.CephMonManager(self)
|
||||
self.drbdconfig = drbdconfig.drbdconfigManager(self)
|
||||
self.iprofile = iprofile.iprofileManager(self)
|
||||
self.icommunity = icommunity.iCommunityManager(self)
|
||||
self.itrapdest = itrapdest.iTrapdestManager(self)
|
||||
self.port = port.PortManager(self)
|
||||
self.ethernet_port = ethernetport.EthernetPortManager(self)
|
||||
self.address = address.AddressManager(self)
|
||||
|
@ -166,6 +165,7 @@ class Client(http.HTTPClient):
|
|||
self.fernet = fernet.FernetManager(self)
|
||||
self.app = app.AppManager(self)
|
||||
self.host_fs = host_fs.HostFsManager(self)
|
||||
self.kube_cluster = kube_cluster.KubeClusterManager(self)
|
||||
self.kube_version = kube_version.KubeVersionManager(self)
|
||||
self.kube_upgrade = kube_upgrade.KubeUpgradeManager(self)
|
||||
self.kube_host_upgrade = kube_host_upgrade.KubeHostUpgradeManager(self)
|
||||
|
|
|
@ -15,7 +15,7 @@ def _print_device_image_show(obj):
|
|||
'name', 'description', 'image_version',
|
||||
'applied', 'applied_labels']
|
||||
|
||||
if type(obj) is dict:
|
||||
if isinstance(obj, dict):
|
||||
data = [(f, obj.get(f, '')) for f in fields]
|
||||
else:
|
||||
data = [(f, getattr(obj, f, '')) for f in fields]
|
||||
|
|
|
@ -158,7 +158,7 @@ def do_kube_host_upgrade_list(cc, args):
|
|||
help='Hostname of the host')
|
||||
@utils.arg('-p', '--personality',
|
||||
metavar='<personality>',
|
||||
choices=['controller', 'worker', 'storage', 'network', 'profile'],
|
||||
choices=['controller', 'worker', 'edgeworker', 'storage', 'network', 'profile'],
|
||||
help='Personality or type of host [REQUIRED]')
|
||||
@utils.arg('-s', '--subfunctions',
|
||||
metavar='<subfunctions>',
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
#
|
||||
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
#
|
||||
|
||||
from cgtsclient.common import base
|
||||
from cgtsclient import exc
|
||||
|
||||
|
||||
CREATION_ATTRIBUTES = ['community']
|
||||
|
||||
|
||||
class iCommunity(base.Resource):
|
||||
def __repr__(self):
|
||||
return "<iCommunity %s>" % self._info
|
||||
|
||||
|
||||
class iCommunityManager(base.Manager):
|
||||
resource_class = iCommunity
|
||||
|
||||
@staticmethod
|
||||
def _path(id=None):
|
||||
return '/v1/icommunity/%s' % id if id else '/v1/icommunity'
|
||||
|
||||
def list(self):
|
||||
return self._list(self._path(), "icommunity")
|
||||
|
||||
def get(self, iid):
|
||||
try:
|
||||
return self._list(self._path(iid))[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def create(self, **kwargs):
|
||||
new = {}
|
||||
for (key, value) in kwargs.items():
|
||||
if key in CREATION_ATTRIBUTES:
|
||||
new[key] = value
|
||||
else:
|
||||
raise exc.InvalidAttribute()
|
||||
return self._create(self._path(), new)
|
||||
|
||||
def delete(self, iid):
|
||||
return self._delete(self._path(iid))
|
||||
|
||||
def update(self, iid, patch):
|
||||
return self._update(self._path(iid), patch)
|
|
@ -1,79 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
from cgtsclient.common import utils
|
||||
from cgtsclient import exc
|
||||
|
||||
|
||||
def _print_icommunity_show(icommunity):
|
||||
fields = ['uuid', 'community', 'view', 'access', 'created_at']
|
||||
data = dict([(f, getattr(icommunity, f, '')) for f in fields])
|
||||
utils.print_dict(data, wrap=72)
|
||||
|
||||
|
||||
def do_snmp_comm_list(cc, args):
|
||||
"""List community strings."""
|
||||
icommunity = cc.icommunity.list()
|
||||
field_labels = ['SNMP community', 'View', 'Access']
|
||||
fields = ['community', 'view', 'access']
|
||||
utils.print_list(icommunity, fields, field_labels, sortby=1)
|
||||
|
||||
|
||||
@utils.arg('icommunity', metavar='<community>', help="Name of icommunity")
|
||||
def do_snmp_comm_show(cc, args):
|
||||
"""Show SNMP community attributes."""
|
||||
try:
|
||||
icommunity = cc.icommunity.get(args.icommunity)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('service not found: %s' % args.icommunity)
|
||||
else:
|
||||
_print_icommunity_show(icommunity)
|
||||
|
||||
|
||||
@utils.arg('-c', '--community',
|
||||
metavar='<community>',
|
||||
help='SNMP community string [REQUIRED]')
|
||||
def do_snmp_comm_add(cc, args):
|
||||
"""Add a new SNMP community."""
|
||||
field_list = ['community', 'view', 'access']
|
||||
fields = dict((k, v) for (k, v) in vars(args).items()
|
||||
if k in field_list and not (v is None))
|
||||
# fields = utils.args_array_to_dict(fields, 'activity')
|
||||
# fields = utils.args_array_to_dict(fields, 'reason')
|
||||
icommunity = cc.icommunity.create(**fields)
|
||||
|
||||
field_list.append('uuid')
|
||||
data = dict([(f, getattr(icommunity, f, '')) for f in field_list])
|
||||
utils.print_dict(data, wrap=72)
|
||||
|
||||
|
||||
@utils.arg('icommunity',
|
||||
metavar='<icommunity name>',
|
||||
nargs='+',
|
||||
help="Name of icommunity")
|
||||
def do_snmp_comm_delete(cc, args):
|
||||
"""Delete an SNMP community."""
|
||||
for c in args.icommunity:
|
||||
try:
|
||||
cc.icommunity.delete(c)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('Community not found: %s' % c)
|
||||
print('Deleted community %s' % c)
|
|
@ -90,6 +90,9 @@ def do_host_cpu_list(cc, args):
|
|||
choices=['vswitch', 'shared', 'platform', 'application-isolated'],
|
||||
required=True,
|
||||
help='The Core Function.')
|
||||
@utils.arg('-c', '--cpulist',
|
||||
metavar='<cpulist>',
|
||||
help="List of cpus, mutually exclusive with the -pX options")
|
||||
@utils.arg('-p0', '--num_cores_on_processor0',
|
||||
metavar='<num_cores_on_processor0>',
|
||||
type=int,
|
||||
|
@ -108,7 +111,7 @@ def do_host_cpu_list(cc, args):
|
|||
help='Number of cores on Processor 3.')
|
||||
def do_host_cpu_modify(cc, args):
|
||||
"""Modify cpu core assignments."""
|
||||
field_list = ['function', 'allocated_function',
|
||||
field_list = ['function', 'allocated_function', 'cpulist',
|
||||
'num_cores_on_processor0', 'num_cores_on_processor1',
|
||||
'num_cores_on_processor2', 'num_cores_on_processor3']
|
||||
|
||||
|
@ -119,18 +122,25 @@ def do_host_cpu_modify(cc, args):
|
|||
if k in field_list and not (v is None))
|
||||
|
||||
cap = {'function': user_specified_fields.get('function')}
|
||||
cpulist = user_specified_fields.get('cpulist')
|
||||
if cpulist:
|
||||
cap['cpulist'] = cpulist
|
||||
|
||||
for k, v in user_specified_fields.items():
|
||||
if k.startswith('num_cores_on_processor'):
|
||||
sockets.append({k.lstrip('num_cores_on_processor'): v})
|
||||
|
||||
# can't specify both the -c option and any of the -pX options
|
||||
if sockets and cpulist:
|
||||
raise exc.CommandError('Not allowed to specify both -c and -pX options.')
|
||||
|
||||
if sockets:
|
||||
cap.update({'sockets': sockets})
|
||||
capabilities.append(cap)
|
||||
else:
|
||||
elif not cpulist:
|
||||
raise exc.CommandError('Number of cores on Processor (Socket) '
|
||||
'not provided.')
|
||||
|
||||
capabilities.append(cap)
|
||||
icpus = cc.ihost.host_cpus_modify(ihost.uuid, capabilities)
|
||||
|
||||
field_labels = ['uuid', 'log_core', 'processor', 'phy_core', 'thread',
|
||||
|
|
|
@ -90,6 +90,11 @@ def do_host_disk_list(cc, args):
|
|||
@utils.arg('device_name_path_uuid',
|
||||
metavar='<device name or path or UUID>',
|
||||
help='Name or uuid of disk on the host [REQUIRED]')
|
||||
@utils.arg('-s', '--skip_formatting',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Wipe the current partition information but do not add a new'
|
||||
' partition table.')
|
||||
@utils.arg('--confirm',
|
||||
action='store_true',
|
||||
default=False,
|
||||
|
@ -118,6 +123,7 @@ def do_host_disk_wipe(cc, args):
|
|||
|
||||
fields = dict()
|
||||
fields['partition_table'] = constants.PARTITION_TABLE_GPT
|
||||
fields['skip_formatting'] = 'True' if args.skip_formatting else 'False'
|
||||
|
||||
patch = []
|
||||
for (k, v) in fields.items():
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2015 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -15,9 +15,9 @@ from cgtsclient.v1 import port
|
|||
CREATION_ATTRIBUTES = ['ifname', 'iftype', 'ihost_uuid', 'imtu', 'ifclass',
|
||||
'networks', 'network_uuid', 'networktype', 'aemode', 'txhashpolicy',
|
||||
'providernetworks', 'datanetworks', 'ifcapabilities', 'ports', 'imac',
|
||||
'vlan_id', 'uses', 'used_by',
|
||||
'vlan_id', 'uses', 'used_by', 'primary_reselect',
|
||||
'ipv4_mode', 'ipv6_mode', 'ipv4_pool', 'ipv6_pool',
|
||||
'sriov_numvfs', 'sriov_vf_driver', 'ptp_role']
|
||||
'sriov_numvfs', 'sriov_vf_driver', 'ptp_role', 'max_tx_rate']
|
||||
|
||||
|
||||
class iinterface(base.Resource):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -17,10 +17,11 @@ from cgtsclient.v1 import iinterface as iinterface_utils
|
|||
def _print_iinterface_show(cc, iinterface):
|
||||
fields = ['ifname', 'iftype', 'ports',
|
||||
'imac', 'imtu', 'ifclass', 'ptp_role',
|
||||
'aemode', 'schedpolicy', 'txhashpolicy',
|
||||
'aemode', 'schedpolicy', 'txhashpolicy', 'primary_reselect',
|
||||
'uuid', 'ihost_uuid',
|
||||
'vlan_id', 'uses', 'used_by',
|
||||
'created_at', 'updated_at', 'sriov_numvfs', 'sriov_vf_driver']
|
||||
'created_at', 'updated_at', 'sriov_numvfs',
|
||||
'sriov_vf_driver', 'max_tx_rate']
|
||||
optional_fields = ['ipv4_mode', 'ipv6_mode', 'ipv4_pool', 'ipv6_pool']
|
||||
rename_fields = [{'field': 'dpdksupport', 'label': 'accelerated'}]
|
||||
data = [(f, getattr(iinterface, f, '')) for f in fields]
|
||||
|
@ -79,11 +80,16 @@ def do_host_if_list(cc, args):
|
|||
if i.aemode in ['balanced', '802.3ad']:
|
||||
attr_str = "%s,AE_XMIT_POLICY=%s" % (
|
||||
attr_str, i.txhashpolicy)
|
||||
if i.aemode == 'active_standby' and i.primary_reselect:
|
||||
attr_str = "%s,primary_reselect=%s" % (
|
||||
attr_str, i.primary_reselect)
|
||||
if i.ifclass and i.ifclass == 'data':
|
||||
if False in i.dpdksupport:
|
||||
attr_str = "%s,accelerated=False" % attr_str
|
||||
else:
|
||||
attr_str = "%s,accelerated=True" % attr_str
|
||||
if i.max_tx_rate:
|
||||
attr_str = "%s,max_tx_rate=%s" % (attr_str, i.max_tx_rate)
|
||||
setattr(i, 'attrs', attr_str)
|
||||
|
||||
field_labels = ['uuid', 'name', 'class', 'type', 'vlan id', 'ports',
|
||||
|
@ -116,7 +122,7 @@ def do_host_if_delete(cc, args):
|
|||
help="Name of interface [REQUIRED]")
|
||||
@utils.arg('iftype',
|
||||
metavar='<iftype>',
|
||||
choices=['ae', 'vlan', 'virtual', 'vf'],
|
||||
choices=['ae', 'vlan', 'virtual', 'vf', 'ethernet'],
|
||||
nargs='?',
|
||||
help="Type of the interface")
|
||||
@utils.arg('-a', '--aemode',
|
||||
|
@ -169,13 +175,22 @@ def do_host_if_delete(cc, args):
|
|||
metavar='<ptp role>',
|
||||
choices=['master', 'slave', 'none'],
|
||||
help='The PTP role for this interface')
|
||||
@utils.arg('-r', '--max-tx-rate',
|
||||
dest='max_tx_rate',
|
||||
metavar='<max_tx_rate>',
|
||||
help='The max tx rate (Mb/s) of the SR-IOV VF interface')
|
||||
@utils.arg('--primary-reselect',
|
||||
dest='primary_reselect',
|
||||
metavar='<primary reselect>',
|
||||
choices=['always', 'better', 'failure'],
|
||||
help='The reselection policy for active standby bonded interface (always, better, failure)')
|
||||
def do_host_if_add(cc, args):
|
||||
"""Add an interface."""
|
||||
|
||||
field_list = ['ifname', 'iftype', 'imtu', 'ifclass', 'aemode',
|
||||
'txhashpolicy', 'vlan_id', 'ptp_role',
|
||||
'txhashpolicy', 'vlan_id', 'ptp_role', 'primary_reselect',
|
||||
'ipv4_mode', 'ipv6_mode', 'ipv4_pool', 'ipv6_pool',
|
||||
'sriov_numvfs', 'sriov_vf_driver']
|
||||
'sriov_numvfs', 'sriov_vf_driver', 'max_tx_rate']
|
||||
|
||||
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
|
||||
|
||||
|
@ -192,6 +207,9 @@ def do_host_if_add(cc, args):
|
|||
elif args.iftype == 'vf':
|
||||
uses = args.portsorifaces
|
||||
portnamesoruuids = None
|
||||
elif args.iftype == 'ethernet':
|
||||
uses = args.portsorifaces
|
||||
portnamesoruuids = None
|
||||
else:
|
||||
uses = None
|
||||
portnamesoruuids = ','.join(args.portsorifaces)
|
||||
|
@ -264,13 +282,22 @@ def do_host_if_add(cc, args):
|
|||
metavar='<ptp role>',
|
||||
choices=['master', 'slave', 'none'],
|
||||
help='The PTP role for this interface')
|
||||
@utils.arg('-r', '--max-tx-rate',
|
||||
dest='max_tx_rate',
|
||||
metavar='<max_tx_rate>',
|
||||
help='The max tx rate (Mb/s) of the VF interface')
|
||||
@utils.arg('--primary-reselect',
|
||||
dest='primary_reselect',
|
||||
metavar='<primary reselect>',
|
||||
choices=['always', 'better', 'failure'],
|
||||
help='The reselection policy for active standby bonded interface (always, better, failure)')
|
||||
def do_host_if_modify(cc, args):
|
||||
"""Modify interface attributes."""
|
||||
|
||||
rwfields = ['iftype', 'ifname', 'imtu', 'aemode', 'txhashpolicy',
|
||||
'ports', 'ifclass', 'ptp_role',
|
||||
'ports', 'ifclass', 'ptp_role', 'primary_reselect',
|
||||
'ipv4_mode', 'ipv6_mode', 'ipv4_pool', 'ipv6_pool',
|
||||
'sriov_numvfs', 'sriov_vf_driver']
|
||||
'sriov_numvfs', 'sriov_vf_driver', 'max_tx_rate']
|
||||
|
||||
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2017 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -17,9 +17,9 @@ from six.moves import input
|
|||
|
||||
def _print_isystem_show(isystem):
|
||||
fields = ['name', 'system_type', 'system_mode', 'description', 'location',
|
||||
'contact', 'timezone', 'software_version', 'uuid',
|
||||
'created_at', 'updated_at', 'region_name', 'service_project_name',
|
||||
'security_feature']
|
||||
'latitude', 'longitude', 'contact', 'timezone', 'software_version',
|
||||
'uuid', 'created_at', 'updated_at', 'region_name',
|
||||
'service_project_name', 'security_feature']
|
||||
if isystem.capabilities.get('region_config'):
|
||||
fields.append('shared_services')
|
||||
setattr(isystem, 'shared_services',
|
||||
|
@ -76,6 +76,12 @@ def do_show(cc, args):
|
|||
@utils.arg('-l', '--location',
|
||||
metavar='<location>',
|
||||
help='The location of the system')
|
||||
@utils.arg('-la', '--latitude',
|
||||
metavar='<latitude>',
|
||||
help='The latitude GEO location coordinate of the system')
|
||||
@utils.arg('-lo', '--longitude',
|
||||
metavar='<longitude>',
|
||||
help='The longitude GEO location coordinate of the system')
|
||||
@utils.arg('-p', '--https_enabled',
|
||||
metavar='<https_enabled>',
|
||||
choices=['true', 'false', 'True', 'False'],
|
||||
|
@ -102,10 +108,6 @@ def do_modify(cc, args):
|
|||
if isystem.system_type != constants.TS_AIO:
|
||||
raise exc.CommandError("system_mode can only be modified on an "
|
||||
"AIO system")
|
||||
if isystem.system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
||||
raise exc.CommandError("system_mode can not be modified if it is "
|
||||
"currently set to '%s'" %
|
||||
constants.SYSTEM_MODE_SIMPLEX)
|
||||
mode = args.system_mode
|
||||
if isystem.system_mode == mode:
|
||||
raise exc.CommandError("system_mode value already set to '%s'" %
|
||||
|
@ -134,8 +136,9 @@ def do_modify(cc, args):
|
|||
return
|
||||
print('Please follow the admin guide to complete the reconfiguration.')
|
||||
|
||||
field_list = ['name', 'system_mode', 'description', 'location', 'contact',
|
||||
'timezone', 'sdn_enabled', 'https_enabled', 'vswitch_type', 'security_feature']
|
||||
field_list = ['name', 'system_mode', 'description', 'location', 'latitude',
|
||||
'longitude', 'contact', 'timezone', 'sdn_enabled',
|
||||
'https_enabled', 'vswitch_type', 'security_feature']
|
||||
|
||||
# use field list as filter
|
||||
user_fields = dict((k, v) for (k, v) in vars(args).items()
|
||||
|
@ -152,6 +155,20 @@ def do_modify(cc, args):
|
|||
if k == "https_enabled" and v == "true":
|
||||
print_https_warning = True
|
||||
|
||||
# If there is an existing ssl or tpm certificate in system, it will
|
||||
# be used instead of installing the default self signed certificate.
|
||||
if print_https_warning:
|
||||
certificates = cc.certificate.list()
|
||||
for certificate in certificates:
|
||||
if certificate.certtype in ['ssl', 'tpm_mode']:
|
||||
warning = ("Existing certificate %s is used for https."
|
||||
% certificate.uuid)
|
||||
break
|
||||
else:
|
||||
warning = "HTTPS enabled with a self-signed certificate.\nThis " \
|
||||
"should be changed to a CA-signed certificate with " \
|
||||
"'system certificate-install'. "
|
||||
|
||||
try:
|
||||
isystem = cc.isystem.update(isystem.uuid, patch)
|
||||
except exc.HTTPNotFound:
|
||||
|
@ -159,5 +176,4 @@ def do_modify(cc, args):
|
|||
_print_isystem_show(isystem)
|
||||
|
||||
if print_https_warning:
|
||||
print("HTTPS enabled with a self-signed certificate.\nThis should be "
|
||||
"changed to a CA-signed certificate with 'system certificate-install'. ")
|
||||
print(warning)
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
#
|
||||
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
#
|
||||
|
||||
from cgtsclient.common import base
|
||||
from cgtsclient import exc
|
||||
|
||||
|
||||
CREATION_ATTRIBUTES = ['ip_address', 'community']
|
||||
|
||||
|
||||
class iTrapdest(base.Resource):
|
||||
def __repr__(self):
|
||||
return "<iTrapdest %s>" % self._info
|
||||
|
||||
|
||||
class iTrapdestManager(base.Manager):
|
||||
resource_class = iTrapdest
|
||||
|
||||
@staticmethod
|
||||
def _path(id=None):
|
||||
return '/v1/itrapdest/%s' % id if id else '/v1/itrapdest'
|
||||
|
||||
def list(self):
|
||||
return self._list(self._path(), "itrapdest")
|
||||
|
||||
def get(self, iid):
|
||||
try:
|
||||
return self._list(self._path(iid))[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def create(self, **kwargs):
|
||||
new = {}
|
||||
for (key, value) in kwargs.items():
|
||||
if key in CREATION_ATTRIBUTES:
|
||||
new[key] = value
|
||||
else:
|
||||
raise exc.InvalidAttribute()
|
||||
return self._create(self._path(), new)
|
||||
|
||||
def delete(self, iid):
|
||||
return self._delete(self._path(iid))
|
||||
|
||||
def update(self, iid, patch):
|
||||
return self._update(self._path(iid), patch)
|
|
@ -1,83 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
from cgtsclient.common import utils
|
||||
from cgtsclient import exc
|
||||
|
||||
|
||||
def _print_itrapdest_show(itrapdest):
|
||||
fields = ['uuid', 'ip_address', 'community', 'port', 'type',
|
||||
'transport', 'created_at']
|
||||
data = dict([(f, getattr(itrapdest, f, '')) for f in fields])
|
||||
utils.print_dict(data, wrap=72)
|
||||
|
||||
|
||||
def do_snmp_trapdest_list(cc, args):
|
||||
"""List SNMP trap destinations."""
|
||||
itrapdest = cc.itrapdest.list()
|
||||
field_labels = ['IP Address', 'SNMP Community', 'Port', 'Type', 'Transport']
|
||||
fields = ['ip_address', 'community', 'port', 'type', 'transport']
|
||||
utils.print_list(itrapdest, fields, field_labels, sortby=1)
|
||||
|
||||
|
||||
@utils.arg('itrapdest', metavar='<ip_address>', help="IP address of itrapdest")
|
||||
def do_snmp_trapdest_show(cc, args):
|
||||
"""Show a SNMP trap destination."""
|
||||
try:
|
||||
itrapdest = cc.itrapdest.get(args.itrapdest)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('Trap Destination not found: %s' % args.itrapdest)
|
||||
else:
|
||||
_print_itrapdest_show(itrapdest)
|
||||
|
||||
|
||||
@utils.arg('-i', '--ip_address',
|
||||
metavar='<ip_address>',
|
||||
help='IP address of the trap destination [REQUIRED]')
|
||||
@utils.arg('-c', '--community',
|
||||
metavar='<community>',
|
||||
help='SNMP community string [REQUIRED]')
|
||||
def do_snmp_trapdest_add(cc, args):
|
||||
"""Create a new SNMP trap destination."""
|
||||
field_list = ['ip_address', 'community', 'port', 'type', 'transport']
|
||||
fields = dict((k, v) for (k, v) in vars(args).items()
|
||||
if k in field_list and not (v is None))
|
||||
# fields = utils.args_array_to_dict(fields, 'activity')
|
||||
# fields = utils.args_array_to_dict(fields, 'reason')
|
||||
itrapdest = cc.itrapdest.create(**fields)
|
||||
|
||||
field_list.append('uuid')
|
||||
data = dict([(f, getattr(itrapdest, f, '')) for f in field_list])
|
||||
utils.print_dict(data, wrap=72)
|
||||
|
||||
|
||||
@utils.arg('itrapdest',
|
||||
metavar='<ip_address>',
|
||||
nargs='+',
|
||||
help="IP Address of itrapdest")
|
||||
def do_snmp_trapdest_delete(cc, args):
|
||||
"""Delete an SNMP trap destination."""
|
||||
for c in args.itrapdest:
|
||||
try:
|
||||
cc.itrapdest.delete(c)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('IP not found: %s' % c)
|
||||
print('Deleted ip %s' % c)
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from cgtsclient.common import base
|
||||
|
||||
|
||||
class KubeCluster(base.Resource):
|
||||
def __repr__(self):
|
||||
return "<kube_cluster %s>" % self._info
|
||||
|
||||
|
||||
class KubeClusterManager(base.Manager):
|
||||
resource_class = KubeCluster
|
||||
|
||||
@staticmethod
|
||||
def _path(name=None):
|
||||
return '/v1/kube_clusters/%s' % name if name else '/v1/kube_clusters'
|
||||
|
||||
def list(self):
|
||||
"""Retrieve the list of kubernetes clusters known to the system."""
|
||||
|
||||
return self._list(self._path(), 'kube_clusters')
|
||||
|
||||
def get(self, name):
|
||||
"""Retrieve the details of a given kubernetes cluster
|
||||
|
||||
:param name: kubernetes cluster name
|
||||
"""
|
||||
try:
|
||||
return self._list(self._path(name))[0]
|
||||
except IndexError:
|
||||
return None
|
|
@ -0,0 +1,34 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from cgtsclient.common import utils
|
||||
from cgtsclient import exc
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
def _print_kube_cluster_show(kube_cluster):
|
||||
ordereddata = OrderedDict(sorted(kube_cluster.to_dict().items(),
|
||||
key=lambda t: t[0]))
|
||||
utils.print_dict(ordereddata, wrap=72)
|
||||
|
||||
|
||||
def do_kube_cluster_list(cc, args):
|
||||
"""List all kubernetes clusters"""
|
||||
versions = cc.kube_cluster.list()
|
||||
fields = ['cluster_name', 'cluster_version', 'cluster_api_endpoint']
|
||||
labels = fields
|
||||
utils.print_list(versions, fields, labels, sortby=0)
|
||||
|
||||
|
||||
@utils.arg('name', metavar="<cluster-name>",
|
||||
help="Kubernetes cluster name", default=None)
|
||||
def do_kube_cluster_show(cc, args):
|
||||
"""Show kubernetes cluster details"""
|
||||
try:
|
||||
name = cc.kube_cluster.get(args.name)
|
||||
_print_kube_cluster_show(name)
|
||||
except exc.HTTPNotFound:
|
||||
raise exc.CommandError('kubernetes cluster not found: %s' % args.name)
|
|
@ -8,8 +8,25 @@ from cgtsclient.common import utils
|
|||
from cgtsclient import exc
|
||||
from cgtsclient.v1 import ihost as ihost_utils
|
||||
|
||||
# PCI Device Class ID in hexadecimal string
|
||||
PCI_DEVICE_CLASS_FPGA = '120000'
|
||||
# Account for those accelerator cards with a progIF set.
|
||||
# PCI Device Class ID in hexadecimal string.
|
||||
|
||||
|
||||
class pci_device_class_acclr:
|
||||
def __init__(self):
|
||||
self.pci_class_ids = ['120000', '120001']
|
||||
|
||||
def __eq__(self, other):
|
||||
return (other in self.pci_class_ids)
|
||||
|
||||
def __ne__(self, other):
|
||||
return (other not in self.pci_class_ids)
|
||||
|
||||
def __str__(self):
|
||||
return ' '.join(self.pci_class_ids)
|
||||
|
||||
|
||||
PCI_DEVICE_CLASS_FPGA = pci_device_class_acclr()
|
||||
|
||||
|
||||
def _print_device_show(device):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -23,7 +23,6 @@ from cgtsclient.v1 import health_shell
|
|||
from cgtsclient.v1 import helm_shell
|
||||
from cgtsclient.v1 import host_fs_shell
|
||||
|
||||
from cgtsclient.v1 import icommunity_shell
|
||||
from cgtsclient.v1 import icpu_shell
|
||||
from cgtsclient.v1 import idisk_shell
|
||||
from cgtsclient.v1 import idns_shell
|
||||
|
@ -41,9 +40,9 @@ from cgtsclient.v1 import isensor_shell
|
|||
from cgtsclient.v1 import isensorgroup_shell
|
||||
from cgtsclient.v1 import istor_shell
|
||||
from cgtsclient.v1 import isystem_shell
|
||||
from cgtsclient.v1 import itrapdest_shell
|
||||
from cgtsclient.v1 import iuser_shell
|
||||
|
||||
from cgtsclient.v1 import kube_cluster_shell
|
||||
from cgtsclient.v1 import kube_upgrade_shell
|
||||
from cgtsclient.v1 import kube_version_shell
|
||||
from cgtsclient.v1 import label_shell
|
||||
|
@ -93,8 +92,6 @@ COMMAND_MODULES = [
|
|||
sm_service_nodes_shell,
|
||||
sm_servicegroup_shell,
|
||||
sm_service_shell,
|
||||
icommunity_shell,
|
||||
itrapdest_shell,
|
||||
ethernetport_shell,
|
||||
port_shell,
|
||||
address_shell,
|
||||
|
@ -125,6 +122,7 @@ COMMAND_MODULES = [
|
|||
label_shell,
|
||||
app_shell,
|
||||
host_fs_shell,
|
||||
kube_cluster_shell,
|
||||
kube_version_shell,
|
||||
kube_upgrade_shell,
|
||||
device_image_shell,
|
||||
|
|
|
@ -236,5 +236,5 @@ def backend_delete(cc, backend_name_or_uuid):
|
|||
raise exc.CommandError("Backend %s is not found."
|
||||
% backend_name_or_uuid)
|
||||
|
||||
backend_client = getattr(cc, 'storage_' + db_backend.backend)
|
||||
backend_client = getattr(cc, 'storage_' + db_backend.backend.replace("-", "_"))
|
||||
backend_client.delete(db_backend.uuid)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -10,6 +10,7 @@
|
|||
|
||||
import argparse
|
||||
|
||||
from cgtsclient.common import constants
|
||||
from cgtsclient.common import utils
|
||||
from cgtsclient.v1 import storage_backend as storage_backend_utils
|
||||
|
||||
|
@ -57,7 +58,7 @@ def do_storage_backend_show(cc, args):
|
|||
|
||||
@utils.arg('backend',
|
||||
metavar='<backend>',
|
||||
choices=['ceph', 'ceph-external', 'file', 'lvm', 'external'],
|
||||
choices=['ceph', 'ceph-external', 'file', 'lvm', 'external', 'ceph-rook'],
|
||||
help='The storage backend to add [REQUIRED]')
|
||||
@utils.arg('-s', '--services',
|
||||
metavar='<services>',
|
||||
|
@ -88,6 +89,10 @@ def do_storage_backend_show(cc, args):
|
|||
@utils.arg('--ceph-mon-gib',
|
||||
metavar='<ceph-mon-gib>',
|
||||
help='The ceph-mon-lv size in GiB')
|
||||
@utils.arg('--network',
|
||||
metavar='<network>',
|
||||
choices=constants.SB_SUPPORTED_NETWORKS[constants.SB_TYPE_CEPH],
|
||||
help='Desired network to be used by the backend.')
|
||||
def do_storage_backend_add(cc, args):
|
||||
"""Add a storage backend."""
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -13,14 +13,14 @@ from cgtsclient import exc
|
|||
CREATION_ATTRIBUTES = ['confirmed', 'name', 'services', 'capabilities',
|
||||
'tier_uuid', 'cinder_pool_gib', 'glance_pool_gib',
|
||||
'ephemeral_pool_gib', 'object_pool_gib',
|
||||
'kube_pool_gib', 'object_gateway']
|
||||
'kube_pool_gib', 'object_gateway', 'network']
|
||||
DISPLAY_ATTRIBUTES = ['object_gateway', 'ceph_total_space_gib',
|
||||
'object_pool_gib', 'cinder_pool_gib',
|
||||
'kube_pool_gib', 'glance_pool_gib', 'ephemeral_pool_gib',
|
||||
'tier_name', 'tier_uuid']
|
||||
'tier_name', 'tier_uuid', 'network']
|
||||
PATCH_ATTRIBUTES = ['object_gateway', 'object_pool_gib',
|
||||
'cinder_pool_gib', 'glance_pool_gib',
|
||||
'ephemeral_pool_gib', 'kube_pool_gib']
|
||||
'ephemeral_pool_gib', 'kube_pool_gib', 'network']
|
||||
|
||||
|
||||
class StorageCeph(base.Resource):
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
#
|
||||
# Copyright (c) 2020 Intel Corporation, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# -*- encoding: utf-8 -*-
|
||||
#
|
||||
|
||||
from cgtsclient.common import base
|
||||
from cgtsclient import exc
|
||||
|
||||
CREATION_ATTRIBUTES = ['confirmed', 'name', 'services', 'capabilities']
|
||||
DISPLAY_ATTRIBUTES = []
|
||||
PATCH_ATTRIBUTES = []
|
||||
|
||||
|
||||
class StorageCephRook(base.Resource):
|
||||
def __repr__(self):
|
||||
return "<storage_ceph_rook %s>" % self._info
|
||||
|
||||
|
||||
class StorageCephRookManager(base.Manager):
|
||||
resource_class = StorageCephRook
|
||||
|
||||
@staticmethod
|
||||
def _path(id=None):
|
||||
return '/v1/storage_ceph_rook/%s' % id if id else '/v1/storage_ceph_rook'
|
||||
|
||||
def list(self):
|
||||
return self._list(self._path(), "storage_ceph_rook")
|
||||
|
||||
def get(self, stor_id=None):
|
||||
try:
|
||||
if stor_id:
|
||||
return self._list(self._path(stor_id))[0]
|
||||
else:
|
||||
return self._list(self._path(), "storage_ceph_rook")[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def create(self, **kwargs):
|
||||
new = {}
|
||||
for (key, value) in kwargs.items():
|
||||
if key in CREATION_ATTRIBUTES:
|
||||
new[key] = value
|
||||
else:
|
||||
raise exc.InvalidAttribute('%s' % key)
|
||||
return self._create(self._path(), new)
|
||||
|
||||
def delete(self, stor_id):
|
||||
return self._delete(self._path(stor_id))
|
||||
|
||||
def update(self, stor_id, patch):
|
||||
return self._update(self._path(stor_id), patch)
|
|
@ -2,5 +2,7 @@ keyring
|
|||
oslo.i18n # Apache-2.0
|
||||
oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0
|
||||
oslo.utils>=3.5.0 # Apache-2.0
|
||||
six>=1.10.0 # MIT
|
||||
prettytable<0.8,>=0.7.2 # BSD
|
||||
requests-toolbelt
|
||||
python-dateutil
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#! /bin/sh
|
||||
#
|
||||
# Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -29,6 +29,7 @@ SYSINV_CONF_DIR="/etc/sysinv"
|
|||
SYSINV_CONF_FILE="${SYSINV_CONF_DIR}/sysinv.conf"
|
||||
SYSINV_CONF_DEFAULT_FILE="/opt/platform/sysinv/${SW_VERSION}/sysinv.conf.default"
|
||||
SYSINV_READY_FLAG=/var/run/.sysinv_ready
|
||||
SYSINV_REPORTED_FLAG=/var/run/sysinv/.sysinv_reported
|
||||
|
||||
DELAY_SEC=20
|
||||
|
||||
|
@ -62,11 +63,11 @@ function mount_and_copy_config_file()
|
|||
if [ ${RETVAL} -ne 0 ] ; then
|
||||
logger "$0: Warn: nfs-mount controller:/opt/platform/sysinv/${SW_VERSION} /mnt/sysinv"
|
||||
else
|
||||
mkdir -p $SYSINV_CONF_DIR
|
||||
mkdir -p $SYSINV_CONF_DIR
|
||||
cp /mnt/sysinv/sysinv.conf.default ${SYSINV_CONF_FILE}
|
||||
RETVAL=$?
|
||||
RETVAL=$?
|
||||
if [ $? -ne 0 ] ; then
|
||||
logger "$0: Warn: cp /mnt/sysinv/sysinv.conf.default ${SYSINV_CONF_FILE}"
|
||||
logger "$0: Warn: cp /mnt/sysinv/sysinv.conf.default ${SYSINV_CONF_FILE}"
|
||||
fi
|
||||
timeout 5s umount /mnt/sysinv
|
||||
rmdir /mnt/sysinv
|
||||
|
@ -84,18 +85,20 @@ case "$1" in
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# if [ "$NODETYPE" = "compute" ] ; then
|
||||
# if [ "$NODETYPE" = "compute" ] || [ "$NODETYPE" = "controller" ] ; then
|
||||
echo -n "Setting up config for sysinv-agent: "
|
||||
if [ -e ${SYSINV_READY_FLAG} ] ; then
|
||||
echo -n "Setting up config for sysinv-agent: "
|
||||
if [ -e ${SYSINV_READY_FLAG} ] ; then
|
||||
# clear it on every restart, so agent can update it
|
||||
rm -f ${SYSINV_READY_FLAG}
|
||||
fi
|
||||
|
||||
if [ -f ${SYSINV_CONF_FILE} ] ; then
|
||||
logger "$0: ${SYSINV_CONF_FILE} already exists"
|
||||
RETVAL=0
|
||||
else
|
||||
if [ -e ${SYSINV_REPORTED_FLAG} ] ; then
|
||||
# clear it on every restart, so agent can update it
|
||||
rm -f ${SYSINV_REPORTED_FLAG}
|
||||
fi
|
||||
|
||||
if [ -f ${SYSINV_CONF_FILE} ] ; then
|
||||
logger "$0: ${SYSINV_CONF_FILE} already exists"
|
||||
RETVAL=0
|
||||
else
|
||||
# Avoid self-mount due to potential nfs issues
|
||||
echo "Checking for controller-platform-nfs "
|
||||
|
||||
|
@ -120,20 +123,20 @@ case "$1" in
|
|||
echo "controller-platform-nfs is not available"
|
||||
else
|
||||
# Only required if conf file does not already exist
|
||||
if [ -f ${SYSINV_CONF_DEFAULT_FILE} ]
|
||||
then
|
||||
echo "Copying self sysinv.conf without mount"
|
||||
mkdir -p $SYSINV_CONF_DIR
|
||||
cp ${SYSINV_CONF_DEFAULT_FILE} ${SYSINV_CONF_FILE}
|
||||
RETVAL=$?
|
||||
if [ $? -ne 0 ] ; then
|
||||
if [ -f ${SYSINV_CONF_DEFAULT_FILE} ]
|
||||
then
|
||||
echo "Copying self sysinv.conf without mount"
|
||||
mkdir -p $SYSINV_CONF_DIR
|
||||
cp ${SYSINV_CONF_DEFAULT_FILE} ${SYSINV_CONF_FILE}
|
||||
RETVAL=$?
|
||||
if [ $? -ne 0 ] ; then
|
||||
logger "$0: Warn: cp /mnt/sysinv/sysinv.conf.default ${SYSINV_CONF_FILE} failed. Try mount."
|
||||
else
|
||||
CONF_COPIED=1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if [ ${CONF_COPIED} -eq 0 ]
|
||||
then
|
||||
then
|
||||
CONF_COPY_COUNT=0
|
||||
while [ $CONF_COPY_COUNT -lt 3 ]; do
|
||||
if mount_and_copy_config_file ;
|
||||
|
@ -144,52 +147,48 @@ case "$1" in
|
|||
let CONF_COPY_COUNT=CONF_COPY_COUNT+1
|
||||
logger "$0: Warn: Mount and copy config file failed. Attempt: ${CONF_COPY_COUNT}"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -n "Installing virtio_net driver: "
|
||||
timeout 5s modprobe virtio_net
|
||||
RETVAL=$?
|
||||
if [ ${RETVAL} -eq 0 ] ; then
|
||||
echo "OK"
|
||||
else
|
||||
echo "FAIL"
|
||||
fi
|
||||
echo -n "Installing virtio_net driver: "
|
||||
timeout 5s modprobe virtio_net
|
||||
RETVAL=$?
|
||||
if [ ${RETVAL} -eq 0 ] ; then
|
||||
echo "OK"
|
||||
else
|
||||
echo "FAIL"
|
||||
fi
|
||||
|
||||
if [ -e ${daemon_pidfile} ] ; then
|
||||
echo "Killing existing process before starting new"
|
||||
pid=`cat ${daemon_pidfile}`
|
||||
kill -TERM $pid
|
||||
rm -f ${daemon_pidfile}
|
||||
fi
|
||||
if [ -e ${daemon_pidfile} ] ; then
|
||||
echo "Killing existing process before starting new"
|
||||
pid=`cat ${daemon_pidfile}`
|
||||
kill -TERM $pid
|
||||
rm -f ${daemon_pidfile}
|
||||
fi
|
||||
|
||||
echo -n "Starting sysinv-agent: "
|
||||
/bin/sh -c "${SYSINVAGENT}"' >> /dev/null 2>&1 & echo $!' > ${daemon_pidfile}
|
||||
RETVAL=$?
|
||||
if [ $RETVAL -eq 0 ] ; then
|
||||
echo "OK"
|
||||
touch /var/lock/subsys/${DAEMON_NAME}
|
||||
else
|
||||
echo "FAIL"
|
||||
fi
|
||||
# fi
|
||||
echo -n "Starting sysinv-agent: "
|
||||
/bin/sh -c "${SYSINVAGENT}"' >> /dev/null 2>&1 & echo $!' > ${daemon_pidfile}
|
||||
RETVAL=$?
|
||||
if [ $RETVAL -eq 0 ] ; then
|
||||
echo "OK"
|
||||
touch /var/lock/subsys/${DAEMON_NAME}
|
||||
else
|
||||
echo "FAIL"
|
||||
fi
|
||||
;;
|
||||
|
||||
stop)
|
||||
# if [ "$NODETYPE" = "compute" ] ; then
|
||||
# if [ "$NODETYPE" = "compute" ] || [ "$NODETYPE" = "controller" ] ; then
|
||||
echo -n "Stopping sysinv-agent: "
|
||||
if [ -e ${daemon_pidfile} ] ; then
|
||||
pid=`cat ${daemon_pidfile}`
|
||||
kill -TERM $pid
|
||||
rm -f ${daemon_pidfile}
|
||||
rm -f /var/lock/subsys/${DAEMON_NAME}
|
||||
echo "OK"
|
||||
else
|
||||
echo "FAIL"
|
||||
fi
|
||||
# fi
|
||||
echo -n "Stopping sysinv-agent: "
|
||||
if [ -e ${daemon_pidfile} ] ; then
|
||||
pid=`cat ${daemon_pidfile}`
|
||||
kill -TERM $pid
|
||||
rm -f ${daemon_pidfile}
|
||||
rm -f /var/lock/subsys/${DAEMON_NAME}
|
||||
echo "OK"
|
||||
else
|
||||
echo "FAIL"
|
||||
fi
|
||||
;;
|
||||
|
||||
restart)
|
||||
|
|
|
@ -36,7 +36,7 @@ do
|
|||
# be an empty string.
|
||||
if [ -n "$ADDR" ]
|
||||
then
|
||||
echo LOG "registry.local resolved IPv4, continuing with docker login"
|
||||
LOG "registry.local resolved IPv4, continuing with docker login"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
|
@ -50,7 +50,7 @@ do
|
|||
# be an empty string.
|
||||
if [ -n "$ADDR" ]
|
||||
then
|
||||
echo LOG "registry.local resolved IPv6, continuing with docker login"
|
||||
LOG "registry.local resolved IPv6, continuing with docker login"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
|
@ -59,9 +59,12 @@ do
|
|||
done
|
||||
|
||||
|
||||
docker login --password-stdin -u ${DOCKER_USERNAME} registry.local:9001 <<< ${DOCKER_PASSWORD}
|
||||
if [ $? -eq 0 ]
|
||||
res=$(docker login --password-stdin -u ${DOCKER_USERNAME} registry.local:9001 2>&1 <<< ${DOCKER_PASSWORD})
|
||||
rc=$?
|
||||
if [ ${rc} -eq 0 ]
|
||||
then
|
||||
LOG "docker login to registry.local completed successfully"
|
||||
touch /var/run/docker_login_done
|
||||
else
|
||||
LOG "docker login error ${rc} ${res}"
|
||||
fi
|
||||
|
|
|
@ -26,8 +26,8 @@ Requires: python3-kubernetes
|
|||
Requires: python3-netaddr
|
||||
Requires: python3-paste
|
||||
Requires: python3-pbr
|
||||
Requires: python3-psutil
|
||||
Requires: python3-pyudev
|
||||
Requires: python3-psutil
|
||||
Requires: python3-requests
|
||||
Requires: python3-retrying
|
||||
Requires: python3-sqlalchemy
|
||||
|
@ -36,13 +36,13 @@ Requires: python3-webob
|
|||
Requires: python3-webtest
|
||||
Requires: python3-wsme
|
||||
Requires: python3-six
|
||||
Requires: python3-django
|
||||
Requires: python3-mox3
|
||||
Requires: python3-rfc3986
|
||||
Requires: python3-oslo-i18n
|
||||
Requires: python3-oslo-config
|
||||
Requires: python3-oslo-concurrency
|
||||
Requires: python3-oslo-db
|
||||
Requires: python3-oslo-log
|
||||
Requires: python3-oslo-rootwrap
|
||||
Requires: python3-oslo-serialization
|
||||
Requires: python3-oslo-service
|
||||
Requires: python3-oslo-utils
|
||||
|
@ -116,6 +116,7 @@ install -p -D -m 755 scripts/validate-platform-backup.sh %{buildroot}%{local_bin
|
|||
install -p -D -m 755 scripts/manage-partitions %{buildroot}%{local_bindir}/manage-partitions
|
||||
install -p -D -m 755 scripts/query_pci_id %{buildroot}%{local_bindir}/query_pci_id
|
||||
install -p -D -m 700 scripts/kube-cert-rotation.sh %{buildroot}%{local_bindir}/kube-cert-rotation.sh
|
||||
install -p -D -m 755 scripts/ceph_k8s_update_monitors.sh %{buildroot}%{local_bindir}/ceph_k8s_update_monitors.sh
|
||||
|
||||
%clean
|
||||
echo "CLEAN CALLED"
|
||||
|
|
|
@ -16,9 +16,9 @@ Architecture: all
|
|||
Depends: ${misc:Depends},
|
||||
${python:Depends},
|
||||
python-boto3,
|
||||
python-django,
|
||||
python-docker,
|
||||
python-parted,
|
||||
python-rfc3986,
|
||||
python-six,
|
||||
python-pyudev,
|
||||
python-pbr,
|
||||
|
@ -48,7 +48,6 @@ Depends: ${misc:Depends},
|
|||
python-retrying,
|
||||
python-sqlalchemy,
|
||||
python-stevedore,
|
||||
python-mox3,
|
||||
python-pytest,
|
||||
python-testtools,
|
||||
tsconfig
|
||||
|
|
|
@ -30,6 +30,7 @@ Requires: python2-keystonemiddleware
|
|||
Requires: python2-kubernetes
|
||||
Requires: python2-netaddr
|
||||
Requires: python2-paste
|
||||
Requires: python2-rfc3986
|
||||
Requires: python2-pyudev
|
||||
Requires: python2-pbr
|
||||
Requires: python2-psutil
|
||||
|
@ -41,12 +42,12 @@ Requires: python2-WSME
|
|||
Requires: python2-six
|
||||
Requires: python2-sqlalchemy
|
||||
Requires: python2-stevedore
|
||||
Requires: python2-mox3
|
||||
Requires: python2-oslo.i18n
|
||||
Requires: python2-oslo.config
|
||||
Requires: python2-oslo.concurrency
|
||||
Requires: python2-oslo.db
|
||||
Requires: python2-oslo.log
|
||||
Requires: python2-oslo.rootwrap
|
||||
Requires: python2-oslo.serialization
|
||||
Requires: python2-oslo.service
|
||||
Requires: python2-oslo.utils
|
||||
|
|
|
@ -163,6 +163,7 @@
|
|||
</xs:sequence>
|
||||
<xs:attribute name="virtualFunctions" type="xs:nonNegativeInteger" use="required" />
|
||||
<xs:attribute name="virtualFunctionDriver" type="xs:string" />
|
||||
<xs:attribute name="maxTxRate" type="xs:nonNegativeInteger" />
|
||||
</xs:complexType>
|
||||
|
||||
<xs:complexType name="route">
|
||||
|
|
|
@ -10,10 +10,8 @@ module=patch_tox_venv
|
|||
module=periodic_task
|
||||
module=policy
|
||||
module=redhat-eventlet.patch
|
||||
module=rootwrap
|
||||
module=rpc
|
||||
module=setup
|
||||
module=version
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=sysinv
|
||||
|
|
|
@ -83,16 +83,13 @@ extension-pkg-whitelist=lxml.etree,greenlet
|
|||
# W1505: deprecated-method
|
||||
# W1509: subprocess-popen-preexec-fn
|
||||
# All these errors should be fixed:
|
||||
# E0633: unpacking-non-sequence
|
||||
# E0701: bad-except-order
|
||||
# E1120: no-value-for-parameter
|
||||
# E1101: no-member
|
||||
# E1111: assignment-from-no-return
|
||||
disable=C, R, fixme, W0101, W0105, W0106, W0107, W0108, W0110, W0123, W0150,
|
||||
W0201, W0211, W0212, W0221, W0223, W0231, W0235, W0311, W0402, W0403,
|
||||
W0404, W0603, W0612, W0613, W0621, W0622, W0631, W0632, W0701, W0703,
|
||||
W0706, W1113, W1201, W1401, W1505, W1509,
|
||||
E0633, E0701, E1120, E1101, E1111
|
||||
E1101, E1111
|
||||
|
||||
[REPORTS]
|
||||
# Set the output format. Available formats are text, parseable, colorized, msvs
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
pbr>=0.5
|
||||
SQLAlchemy
|
||||
amqplib>=0.6.1
|
||||
argparse
|
||||
boto3
|
||||
botocore
|
||||
cryptography!=2.0 # BSD/Apache-2.0
|
||||
|
@ -19,8 +18,10 @@ iso8601>=0.1.4
|
|||
oslo.i18n # Apache-2.0
|
||||
oslo.config>=3.7.0 # Apache-2.0
|
||||
oslo.concurrency>=3.7.1 # Apache-2.0
|
||||
oslo.log # Apache-2.0
|
||||
oslo.db>=4.1.0 # Apache-2.0
|
||||
oslo.messaging!=9.0.0 # Apache-2.0
|
||||
oslo.rootwrap>=5.8.0 # Apache-2.0
|
||||
oslo.service>=1.10.0 # Apache-2.0
|
||||
oslo.utils>=3.5.0 # Apache-2.0
|
||||
oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0
|
||||
|
@ -44,4 +45,9 @@ rpm
|
|||
ruamel.yaml>=0.13.14 # MIT
|
||||
docker # Apache-2.0
|
||||
kubernetes # Apache-2.0
|
||||
Django
|
||||
pyudev
|
||||
migrate
|
||||
python-ldap>=3.1.0
|
||||
psycopg2-binary
|
||||
python-barbicanclient
|
||||
rfc3986
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Utility for patching Kubernetes Persistent Volumes during
|
||||
# AIO-SX to AIO-DX migration.
|
||||
#
|
||||
# This is required because Ceph-mon IP address changes
|
||||
# from controller-0 to floating controller IP. Therefore,
|
||||
# existing PV claims backed by cephfs or RBD will fail to
|
||||
# mount due to previous monitor being inaccessible.
|
||||
|
||||
# Logging info.
|
||||
NAME=$(basename $0)
|
||||
|
||||
# This will log to /var/log/platform.log
|
||||
# and stdout
|
||||
function log {
|
||||
logger -p local1.info "$NAME: $1"
|
||||
echo "$1"
|
||||
}
|
||||
|
||||
function help {
|
||||
echo "Utility for patching Kubernetes Persistent Volumes during AIO-SX to AIO-DX migration"
|
||||
echo
|
||||
echo "Syntax: $NAME [-h] CONTROLLER_0_MGMT_IP FLOATING_CONTROLLER_MGMT_IP"
|
||||
echo "options:"
|
||||
echo "h Prints this Help."
|
||||
echo
|
||||
}
|
||||
|
||||
while getopts ":h" option; do
|
||||
case $option in
|
||||
h)
|
||||
help
|
||||
exit;;
|
||||
\?)
|
||||
log "Error: Invalid option"
|
||||
exit;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
log "Error: Wrong number of arguments"
|
||||
log "Run $NAME -h for help"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# read input arguments
|
||||
CONTROLLER_0_IP=$1
|
||||
CONTROLLER_FLOATING_IP=$2
|
||||
|
||||
function check_pv_need_migration {
|
||||
local mon
|
||||
mon=$(kubectl --kubeconfig=/etc/kubernetes/admin.conf get PersistentVolume $1 -o jsonpath='{.spec.*.monitors}')
|
||||
echo $mon | grep -q $CONTROLLER_0_IP
|
||||
}
|
||||
|
||||
ITER=0
|
||||
MAX_ITER=5
|
||||
while [[ $ITER -le $MAX_ITER ]]; do
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf get StorageClass --all-namespaces > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
log "kubernetes api is not available. Retry ${ITER} of ${MAX_ITER}"
|
||||
ITER=$((ITER + 1))
|
||||
sleep 30
|
||||
else
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ITER -gt $MAX_ITER ]]; then
|
||||
log "kubernetes api is not available. Exiting with failure"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
STORAGE_CLASSES=$(kubectl --kubeconfig=/etc/kubernetes/admin.conf get StorageClass --all-namespaces | \
|
||||
grep -E "ceph.com/cephfs|ceph.com/rbd" | awk '{print $1}')
|
||||
EXISTING_PVCS=$(kubectl --kubeconfig=/etc/kubernetes/admin.conf get PersistentVolume --all-namespaces --no-headers | awk '{print $1}')
|
||||
|
||||
for PVC in $EXISTING_PVCS; do
|
||||
PVC_SC=$(kubectl --kubeconfig=/etc/kubernetes/admin.conf get PersistentVolume $PVC -o json | \
|
||||
grep -Eo '"storageClassName"[^,]*' | awk '{print $2}' | sed 's/"//g')
|
||||
|
||||
for SC in ${STORAGE_CLASSES}
|
||||
do
|
||||
if [ "$SC" == "$PVC_SC" ]; then
|
||||
# Loops over existing Persistent Volumes and replace it changing the CEPH monitor ip address
|
||||
# This is required because updating the monitor ip is not allowed by kubernetes and therefore we need
|
||||
# to re-create it. The replace command will block due to the pv-protection finalizer waiting for the bounded PVC
|
||||
# to be removed but we want to replace the PV without removing the bounded PVC. Therefore, we run the replace command
|
||||
# in the background and run a patch removing the pv-protection finalizer so that replace command completes.
|
||||
check_pv_need_migration $PVC
|
||||
if [ $? -ne 0 ]; then
|
||||
log "skipping PersistentVolume/${PVC} - already patched"
|
||||
continue
|
||||
fi
|
||||
|
||||
log "Started patching PersistentVolume/${PVC}"
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf get PersistentVolume $PVC -o yaml | sed "s/$CONTROLLER_0_IP/$CONTROLLER_FLOATING_IP/g" | \
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf replace --cascade=false --force -f - >/dev/null &
|
||||
sleep 1
|
||||
TIMEOUT=4
|
||||
DELAY=0
|
||||
while [[ $DELAY -lt $TIMEOUT ]]; do
|
||||
timestamp=$(kubectl --kubeconfig=/etc/kubernetes/admin.conf get PersistentVolume $PVC -o jsonpath='{.metadata.deletionTimestamp}')
|
||||
if [ ! -z "${timestamp}" ]; then
|
||||
break
|
||||
else
|
||||
sleep 1
|
||||
DELAY=$((DELAY + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $DELAY -lt $TIMEOUT ]]; then
|
||||
kubectl --kubeconfig=/etc/kubernetes/admin.conf patch PersistentVolume ${PVC} -p '{"metadata":{"finalizers":null}}' --type=merge
|
||||
wait
|
||||
log "PersistentVolume/${PVC} replaced"
|
||||
else
|
||||
log "Timed out waiting to patch PersistentVolume/${PVC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Copyright (c) 2013-2014, 2016 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2014, 2016, 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -37,6 +37,8 @@
|
|||
process="sysinv"
|
||||
service="-api"
|
||||
binname="${process}${service}"
|
||||
readonly max_sysinv_api_request_attempts=15
|
||||
readonly sysinv_api_request_sleep=1
|
||||
|
||||
#######################################################################
|
||||
|
||||
|
@ -47,6 +49,7 @@ OCF_RESKEY_user_default="sysinv"
|
|||
OCF_RESKEY_pid_default="/var/run/${binname}.pid"
|
||||
OCF_RESKEY_config_default="/etc/sysinv/sysinv.conf"
|
||||
OCF_RESKEY_client_binary_default="system"
|
||||
OCF_RESKEY_os_tenant_name_default="admin"
|
||||
|
||||
: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
|
||||
: ${OCF_RESKEY_dbg=${OCF_RESKEY_dbg_default}}
|
||||
|
@ -54,6 +57,7 @@ OCF_RESKEY_client_binary_default="system"
|
|||
: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
|
||||
: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}}
|
||||
: ${OCF_RESKEY_client_binary=${OCF_RESKEY_client_binary_default}}
|
||||
: ${OCF_RESKEY_os_tenant_name=${OCF_RESKEY_os_tenant_name_default}}
|
||||
|
||||
mydaemon="/usr/bin/${OCF_RESKEY_binary}"
|
||||
TMP_DIR=/var/run/sysinv_tmp
|
||||
|
@ -208,6 +212,52 @@ sysinv_api_status() {
|
|||
fi
|
||||
}
|
||||
|
||||
sysinv_api_request () {
|
||||
# Monitor the RA by retrieving the system show
|
||||
if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_os_auth_url" ]; then
|
||||
ocf_run -q $OCF_RESKEY_client_binary \
|
||||
--os-username "$OCF_RESKEY_os_username" \
|
||||
--os-project-name "$OCF_RESKEY_os_tenant_name" \
|
||||
--os-auth-url "$OCF_RESKEY_os_auth_url" \
|
||||
--os-region-name "$OCF_RESKEY_os_region_name" \
|
||||
--system-url "$OCF_RESKEY_system_url" \
|
||||
show > /dev/null 2>&1
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
ocf_log err "Failed to connect to the System Inventory Service (sysinv-api): $rc"
|
||||
return $OCF_ERR_GENERIC
|
||||
else
|
||||
return $OCF_SUCCESS
|
||||
fi
|
||||
else
|
||||
ocf_log err "Unable to run system show, trying direct request on sysinv-api URL (sysinv-api)"
|
||||
# Test request on "http://controller:6385/v1" if minimum variables are not available
|
||||
# "controller" matches the mgmt ip on /etc/hosts
|
||||
ocf_run curl http://controller:6385/v1 > /dev/null 2>&1
|
||||
rc=$?
|
||||
if [ $rc -eq 0 ]; then
|
||||
return $OCF_SUCCESS
|
||||
fi
|
||||
ocf_log err "Unable to communicate with the System Inventory Service (sysinv-api)"
|
||||
return $OCF_ERR_GENERIC
|
||||
fi
|
||||
}
|
||||
|
||||
sysinv_api_request_with_attempt() {
|
||||
for (( i = 1; i <= $max_sysinv_api_request_attempts; i++ ))
|
||||
do
|
||||
sysinv_api_request
|
||||
rc=$?
|
||||
if [ $rc -ne ${OCF_SUCCESS} ]; then
|
||||
ocf_log info "Retrying to connect to the System Inventory Service (sysinv-api), attempt #$i"
|
||||
else
|
||||
break
|
||||
fi
|
||||
sleep $sysinv_api_request_sleep
|
||||
done
|
||||
return ${rc}
|
||||
}
|
||||
|
||||
sysinv_api_monitor () {
|
||||
local rc
|
||||
proc="${binname}:monitor"
|
||||
|
@ -223,25 +273,15 @@ sysinv_api_monitor () {
|
|||
return $rc
|
||||
fi
|
||||
|
||||
# Monitor the RA by retrieving the system show
|
||||
if [ -n "$OCF_RESKEY_os_username" ] && [ -n "$OCF_RESKEY_os_tenant_name" ] && [ -n "$OCF_RESKEY_os_auth_url" ]; then
|
||||
ocf_run -q $OCF_RESKEY_client_binary \
|
||||
--os_username "$OCF_RESKEY_os_username" \
|
||||
--os_project_name "$OCF_RESKEY_os_tenant_name" \
|
||||
--os_auth_url "$OCF_RESKEY_os_auth_url" \
|
||||
--os_region_name "$OCF_RESKEY_os_region_name" \
|
||||
--system_url "$OCF_RESKEY_system_url" \
|
||||
show > /dev/null 2>&1
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
ocf_log err "Failed to connect to the System Inventory Service (sysinv-api): $rc"
|
||||
return $OCF_NOT_RUNNING
|
||||
fi
|
||||
# Trigger a request over sysinv-api to determine if it is properly started
|
||||
sysinv_api_request
|
||||
rc=$?
|
||||
if [ $rc -ne ${OCF_SUCCESS} ] ; then
|
||||
ocf_log err "System Inventory Service (sysinv-api) monitor failed"
|
||||
else
|
||||
ocf_log debug "System Inventory Service (sysinv-api) monitor succeeded"
|
||||
fi
|
||||
|
||||
ocf_log debug "System Inventory Service (sysinv-api) monitor succeeded"
|
||||
|
||||
return $OCF_SUCCESS
|
||||
return ${rc}
|
||||
}
|
||||
|
||||
sysinv_api_start () {
|
||||
|
@ -252,7 +292,8 @@ sysinv_api_start () {
|
|||
ocf_log info "${proc}"
|
||||
fi
|
||||
|
||||
# If running then issue a ping test
|
||||
# If running then issue a ping test and check sysinv-api availability
|
||||
# Retry to connect to it in case of failure
|
||||
if [ -f ${OCF_RESKEY_pid} ] ; then
|
||||
sysinv_api_status
|
||||
rc=$?
|
||||
|
@ -260,7 +301,9 @@ sysinv_api_start () {
|
|||
ocf_log err "${proc} ping test failed (rc=${rc})"
|
||||
sysinv_api_stop
|
||||
else
|
||||
return ${OCF_SUCCESS}
|
||||
sysinv_api_request_with_attempt
|
||||
rc=$?
|
||||
return ${rc}
|
||||
fi
|
||||
fi
|
||||
|
||||
|
@ -285,12 +328,26 @@ sysinv_api_start () {
|
|||
fi
|
||||
fi
|
||||
|
||||
# If running then issue a ping test and check sysinv-api availability
|
||||
# Retry to connect to it in case of failure
|
||||
if [ -f ${OCF_RESKEY_pid} ] ; then
|
||||
sysinv_api_status
|
||||
rc=$?
|
||||
if [ $rc -ne ${OCF_SUCCESS} ] ; then
|
||||
ocf_log info "${proc} ping test failed (rc=${rc})"
|
||||
sysinv_api_stop
|
||||
else
|
||||
sysinv_api_request_with_attempt
|
||||
rc=$?
|
||||
fi
|
||||
fi
|
||||
|
||||
# Record success or failure and return status
|
||||
if [ ${rc} -eq $OCF_SUCCESS ] ; then
|
||||
ocf_log info "Inventory Service (${OCF_RESKEY_binary}) started (pid=${pid})"
|
||||
else
|
||||
ocf_log err "Inventory Service (${OCF_RESKEY_binary}) failed to start (rc=${rc})"
|
||||
rc=${OCF_NOT_RUNNING}
|
||||
rc=${OCF_ERR_GENERIC}
|
||||
fi
|
||||
|
||||
return ${rc}
|
||||
|
|
|
@ -32,13 +32,14 @@ console_scripts =
|
|||
sysinv-fpga-agent = sysinv.cmd.fpga_agent:main
|
||||
sysinv-dbsync = sysinv.cmd.dbsync:main
|
||||
sysinv-conductor = sysinv.cmd.conductor:main
|
||||
sysinv-rootwrap = sysinv.openstack.common.rootwrap.cmd:main
|
||||
sysinv-rootwrap = oslo_rootwrap.cmd:main
|
||||
sysinv-dnsmasq-lease-update = sysinv.cmd.dnsmasq_lease_update:main
|
||||
sysinv-upgrade = sysinv.cmd.upgrade:main
|
||||
sysinv-puppet = sysinv.cmd.puppet:main
|
||||
sysinv-helm = sysinv.cmd.helm:main
|
||||
sysinv-utils = sysinv.cmd.utils:main
|
||||
cert-mon = sysinv.cmd.cert_mon:main
|
||||
sysinv-reset-n3000-fpgas = sysinv.cmd.reset_n3000_fpgas:main
|
||||
|
||||
systemconfig.puppet_plugins =
|
||||
001_platform = sysinv.puppet.platform:PlatformPuppet
|
||||
|
@ -65,11 +66,16 @@ systemconfig.puppet_plugins =
|
|||
035_dockerdistribution = sysinv.puppet.dockerdistribution:DockerDistributionPuppet
|
||||
036_pciirqaffinity = sysinv.puppet.pci_irq_affinity:PciIrqAffinityPuppet
|
||||
038_certmon = sysinv.puppet.certmon:CertMonPuppet
|
||||
039_helm = sysinv.puppet.helm:HelmPuppet
|
||||
040_rook = sysinv.puppet.rook:RookPuppet
|
||||
099_service_parameter = sysinv.puppet.service_parameter:ServiceParamPuppet
|
||||
|
||||
systemconfig.armada.manifest_ops =
|
||||
generic = sysinv.helm.manifest_generic:GenericArmadaManifestOperator
|
||||
|
||||
systemconfig.app_lifecycle =
|
||||
generic = sysinv.helm.lifecycle_generic:GenericAppLifecycleOperator
|
||||
|
||||
sysinv.agent.lldp.drivers =
|
||||
lldpd = sysinv.agent.lldp.drivers.lldpd.driver:SysinvLldpdAgentDriver
|
||||
ovs = sysinv.agent.lldp.drivers.ovs.driver:SysinvOVSAgentDriver
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
|
|
@ -141,11 +141,13 @@ class DiskOperator(object):
|
|||
|
||||
return avail_space_mib
|
||||
|
||||
def disk_format_gpt(self, host_uuid, idisk_dict, is_cinder_device):
|
||||
def disk_prepare(self, host_uuid, idisk_dict,
|
||||
skip_format, is_cinder_device):
|
||||
disk_node = idisk_dict.get('device_path')
|
||||
|
||||
disk_utils.disk_wipe(disk_node)
|
||||
utils.execute('parted', disk_node, 'mklabel', 'gpt')
|
||||
if not skip_format:
|
||||
utils.execute('parted', disk_node, 'mklabel', 'gpt')
|
||||
|
||||
if is_cinder_device:
|
||||
LOG.debug("Removing .node_cinder_lvm_config_complete_file")
|
||||
|
|
|
@ -53,6 +53,58 @@ class LVGOperator(object):
|
|||
|
||||
return thinpools
|
||||
|
||||
def ilvg_rook_get(self):
|
||||
# rook-ceph are hidden by global_filter, list them separately.
|
||||
# keys: matching the field order of pvdisplay command
|
||||
string_keys = ['lvm_vg_name', 'lvm_vg_uuid', 'lvm_vg_access',
|
||||
'lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv',
|
||||
'lvm_cur_pv', 'lvm_vg_size', 'lvm_vg_total_pe',
|
||||
'lvm_vg_free_pe']
|
||||
|
||||
# keys that need to be translated into ints
|
||||
int_keys = ['lvm_max_lv', 'lvm_cur_lv', 'lvm_max_pv',
|
||||
'lvm_cur_pv', 'lvm_vg_size', 'lvm_vg_total_pe',
|
||||
'lvm_vg_free_pe']
|
||||
|
||||
# pvdisplay command to retrieve the pv data of all pvs present
|
||||
vgdisplay_command = 'vgdisplay -C --aligned -o vg_name,vg_uuid,vg_attr'\
|
||||
',max_lv,lv_count,max_pv,pv_count,vg_size,'\
|
||||
'vg_extent_count,vg_free_count'\
|
||||
' --units B --nosuffix --noheadings'
|
||||
|
||||
disable_filter = ' --config \'devices/global_filter=["a|.*|"]\''
|
||||
vgdisplay_command = vgdisplay_command + disable_filter
|
||||
|
||||
try:
|
||||
vgdisplay_process = subprocess.Popen(vgdisplay_command,
|
||||
stdout=subprocess.PIPE,
|
||||
shell=True)
|
||||
vgdisplay_output = vgdisplay_process.stdout.read().decode("utf-8")
|
||||
except Exception as e:
|
||||
self.handle_exception("Could not retrieve vgdisplay "
|
||||
"information: %s" % e)
|
||||
vgdisplay_output = ""
|
||||
|
||||
# parse the output 1 vg/row
|
||||
rook_vgs = []
|
||||
for row in vgdisplay_output.split('\n'):
|
||||
if row.strip().startswith("ceph"):
|
||||
|
||||
# get the values of fields as strings
|
||||
values = row.split()
|
||||
|
||||
# create the dict of attributes
|
||||
attr = dict(zip(string_keys, values))
|
||||
|
||||
# convert required values from strings to ints
|
||||
for k in int_keys:
|
||||
if k in attr.keys():
|
||||
attr[k] = int(attr[k])
|
||||
|
||||
rook_vgs.append(attr)
|
||||
|
||||
return rook_vgs
|
||||
|
||||
def ilvg_get(self, cinder_device=None):
|
||||
'''Enumerate physical volume topology based on:
|
||||
|
||||
|
@ -127,6 +179,11 @@ class LVGOperator(object):
|
|||
if attr:
|
||||
ilvg.append(attr)
|
||||
|
||||
rook_vgs = self.ilvg_rook_get()
|
||||
for vg in rook_vgs:
|
||||
if vg and vg not in ilvg:
|
||||
ilvg.append(vg)
|
||||
|
||||
LOG.debug("ilvg= %s" % ilvg)
|
||||
|
||||
return ilvg
|
||||
|
|
|
@ -34,9 +34,7 @@ Commands (from conductors) are received via RPC calls.
|
|||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
import errno
|
||||
from eventlet.green import subprocess
|
||||
import fcntl
|
||||
import fileinput
|
||||
import os
|
||||
import retrying
|
||||
|
@ -94,8 +92,6 @@ CONF.register_opts(agent_opts, 'agent')
|
|||
MAXSLEEP = 300 # 5 minutes
|
||||
|
||||
SYSINV_READY_FLAG = os.path.join(tsc.VOLATILE_PATH, ".sysinv_ready")
|
||||
SYSINV_FIRST_REPORT_FLAG = os.path.join(tsc.VOLATILE_PATH,
|
||||
".sysinv_agent_report_sent")
|
||||
|
||||
CONFIG_APPLIED_FILE = os.path.join(tsc.PLATFORM_CONF_PATH, ".config_applied")
|
||||
CONFIG_APPLIED_DEFAULT = "install"
|
||||
|
@ -104,6 +100,7 @@ FIRST_BOOT_FLAG = os.path.join(
|
|||
tsc.PLATFORM_CONF_PATH, ".first_boot")
|
||||
|
||||
PUPPET_HIERADATA_PATH = os.path.join(tsc.PUPPET_PATH, 'hieradata')
|
||||
PUPPET_HIERADATA_CACHE_PATH = '/etc/puppet/cache/hieradata'
|
||||
|
||||
LOCK_AGENT_ACTION = 'agent-exclusive-action'
|
||||
|
||||
|
@ -208,6 +205,10 @@ class AgentManager(service.PeriodicService):
|
|||
initial_reports_required = \
|
||||
self.INVENTORY_REPORTS_REQUIRED - self._inventory_reported
|
||||
initial_reports_required.discard(self.HOST_FILESYSTEMS)
|
||||
|
||||
if self._inventory_reported:
|
||||
utils.touch(constants.SYSINV_REPORTED)
|
||||
|
||||
if initial_reports_required:
|
||||
LOG.info("_report_to_conductor initial_reports_required=%s" %
|
||||
initial_reports_required)
|
||||
|
@ -217,7 +218,7 @@ class AgentManager(service.PeriodicService):
|
|||
|
||||
def _report_to_conductor_iplatform_avail(self):
|
||||
# First report sent to conductor since boot
|
||||
utils.touch(SYSINV_FIRST_REPORT_FLAG)
|
||||
utils.touch(constants.SYSINV_FIRST_REPORT_FLAG)
|
||||
# Sysinv-agent ready; used also by the init script.
|
||||
utils.touch(SYSINV_READY_FLAG)
|
||||
time.sleep(1) # give time for conductor to process
|
||||
|
@ -552,7 +553,7 @@ class AgentManager(service.PeriodicService):
|
|||
|
||||
# Is this the first time since boot we are reporting to conductor?
|
||||
msg_dict.update({constants.SYSINV_AGENT_FIRST_REPORT:
|
||||
not os.path.exists(SYSINV_FIRST_REPORT_FLAG)})
|
||||
not os.path.exists(constants.SYSINV_FIRST_REPORT_FLAG)})
|
||||
|
||||
try:
|
||||
rpcapi.iplatform_update_by_ihost(context,
|
||||
|
@ -580,30 +581,12 @@ class AgentManager(service.PeriodicService):
|
|||
"""
|
||||
lock_file_fd = os.open(
|
||||
constants.NETWORK_CONFIG_LOCK_FILE, os.O_CREAT | os.O_RDONLY)
|
||||
count = 1
|
||||
delay = 5
|
||||
max_count = 5
|
||||
while count <= max_count:
|
||||
try:
|
||||
fcntl.flock(lock_file_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
return lock_file_fd
|
||||
except IOError as e:
|
||||
# raise on unrelated IOErrors
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
else:
|
||||
LOG.info("Could not acquire lock({}): {} ({}/{}), "
|
||||
"will retry".format(lock_file_fd, str(e),
|
||||
count, max_count))
|
||||
time.sleep(delay)
|
||||
count += 1
|
||||
LOG.error("Failed to acquire lock (fd={})".format(lock_file_fd))
|
||||
return 0
|
||||
return utils.acquire_exclusive_nb_flock(lock_file_fd)
|
||||
|
||||
def _release_network_config_lock(self, lockfd):
|
||||
""" Release the lock guarding apply_network_config.sh """
|
||||
if lockfd:
|
||||
fcntl.flock(lockfd, fcntl.LOCK_UN)
|
||||
utils.release_flock(lockfd)
|
||||
os.close(lockfd)
|
||||
|
||||
def _get_ports_inventory(self):
|
||||
|
@ -711,8 +694,8 @@ class AgentManager(service.PeriodicService):
|
|||
return port_list, pci_device_list, host_macs
|
||||
|
||||
def _retry_on_missing_host_uuid(ex): # pylint: disable=no-self-argument
|
||||
LOG.info('Caught missing host_uuid exception. Retrying... '
|
||||
'Exception: {}'.format(ex))
|
||||
LOG.info('Caught exception missing host. '
|
||||
'Retrying...Exception: {}'.format(ex))
|
||||
return isinstance(ex, exception.LocalHostUUIDNotFound)
|
||||
|
||||
@retrying.retry(wait_fixed=15 * 1000, stop_max_delay=300 * 1000,
|
||||
|
@ -1129,39 +1112,43 @@ class AgentManager(service.PeriodicService):
|
|||
disk_size = utils.get_disk_capacity_mib(self._ihost_rootfs_device)
|
||||
disk_size = int(disk_size / 1024)
|
||||
|
||||
if disk_size > constants.DEFAULT_SMALL_DISK_SIZE:
|
||||
LOG.info("Disk size for %s: %s ... large disk defaults" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
if self._ihost_personality == constants.CONTROLLER:
|
||||
if disk_size > constants.DEFAULT_SMALL_DISK_SIZE:
|
||||
LOG.info("Disk size for %s: %s ... large disk defaults" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
|
||||
backup_lv_size = \
|
||||
constants.DEFAULT_DATABASE_STOR_SIZE + \
|
||||
constants.DEFAULT_PLATFORM_STOR_SIZE + \
|
||||
constants.BACKUP_OVERHEAD
|
||||
backup_lv_size = \
|
||||
constants.DEFAULT_DATABASE_STOR_SIZE + \
|
||||
constants.DEFAULT_PLATFORM_STOR_SIZE + \
|
||||
constants.BACKUP_OVERHEAD
|
||||
|
||||
elif disk_size >= constants.MINIMUM_SMALL_DISK_SIZE:
|
||||
LOG.info("Disk size for %s : %s ... small disk defaults" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
elif disk_size >= constants.MINIMUM_SMALL_DISK_SIZE:
|
||||
LOG.info("Disk size for %s : %s ... small disk defaults" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
|
||||
# Due to the small size of the disk we can't provide the
|
||||
# proper amount of backup space which is (database + platform_lv
|
||||
# + BACKUP_OVERHEAD) so we are using a smaller default.
|
||||
backup_lv_size = constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
|
||||
# Due to the small size of the disk we can't provide the
|
||||
# proper amount of backup space which is (database +
|
||||
# platform_lv + BACKUP_OVERHEAD) so we are using a smaller
|
||||
# default.
|
||||
backup_lv_size = constants.DEFAULT_SMALL_BACKUP_STOR_SIZE
|
||||
|
||||
elif (disk_size >= constants.MINIMUM_TINY_DISK_SIZE and
|
||||
rpcapi.is_virtual_system_config(icontext) and
|
||||
tsc.system_type == constants.TIS_AIO_BUILD):
|
||||
# Supports StarlingX running in QEMU/KVM VM with a tiny disk(AIO only)
|
||||
LOG.info("Disk size for %s : %s ... tiny disk defaults "
|
||||
"for virtual system configuration" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
kubelet_lv_size = constants.TINY_KUBELET_STOR_SIZE
|
||||
docker_lv_size = constants.TINY_KUBERNETES_DOCKER_STOR_SIZE
|
||||
backup_lv_size = constants.DEFAULT_TINY_BACKUP_STOR_SIZE
|
||||
elif (disk_size >= constants.MINIMUM_TINY_DISK_SIZE and
|
||||
rpcapi.is_virtual_system_config(icontext) and
|
||||
tsc.system_type == constants.TIS_AIO_BUILD):
|
||||
# Supports StarlingX running in QEMU/KVM VM with a tiny
|
||||
# disk (AIO only)
|
||||
LOG.info("Disk size for %s : %s ... tiny disk defaults "
|
||||
"for virtual system configuration" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
kubelet_lv_size = constants.TINY_KUBELET_STOR_SIZE
|
||||
docker_lv_size = constants.TINY_KUBERNETES_DOCKER_STOR_SIZE
|
||||
backup_lv_size = constants.DEFAULT_TINY_BACKUP_STOR_SIZE
|
||||
|
||||
else:
|
||||
LOG.info("Disk size for %s : %s ... disk too small" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
raise exception.SysinvException("Disk size requirements not met.")
|
||||
else:
|
||||
LOG.info("Disk size for %s : %s ... disk too small" %
|
||||
(self._ihost_rootfs_device, disk_size))
|
||||
raise exception.SysinvException(
|
||||
"Disk size requirements not met.")
|
||||
|
||||
# check if the scratch fs is supported for current host
|
||||
if utils.is_filesystem_supported(constants.FILESYSTEM_NAME_SCRATCH,
|
||||
|
@ -1405,22 +1392,6 @@ class AgentManager(service.PeriodicService):
|
|||
if self._ihost_personality != constants.STORAGE:
|
||||
self._update_disk_partitions(rpcapi, icontext, self._ihost_uuid)
|
||||
|
||||
# Update physical volumes
|
||||
ipv = self._ipv_operator.ipv_get(cinder_device=cinder_device)
|
||||
if ((self._prev_pv is None) or
|
||||
(self._prev_pv != ipv)):
|
||||
self._prev_pv = ipv
|
||||
try:
|
||||
rpcapi.ipv_update_by_ihost(icontext,
|
||||
self._ihost_uuid,
|
||||
ipv)
|
||||
self._inventory_reported.add(self.PV)
|
||||
except exception.SysinvException:
|
||||
LOG.exception("Sysinv Agent exception updating ipv"
|
||||
"conductor.")
|
||||
self._prev_pv = None
|
||||
pass
|
||||
|
||||
# Update local volume groups
|
||||
ilvg = self._ilvg_operator.ilvg_get(cinder_device=cinder_device)
|
||||
if ((self._prev_lvg is None) or
|
||||
|
@ -1437,6 +1408,22 @@ class AgentManager(service.PeriodicService):
|
|||
self._prev_lvg = None
|
||||
pass
|
||||
|
||||
# Update physical volumes
|
||||
ipv = self._ipv_operator.ipv_get(cinder_device=cinder_device)
|
||||
if ((self._prev_pv is None) or
|
||||
(self._prev_pv != ipv)):
|
||||
self._prev_pv = ipv
|
||||
try:
|
||||
rpcapi.ipv_update_by_ihost(icontext,
|
||||
self._ihost_uuid,
|
||||
ipv)
|
||||
self._inventory_reported.add(self.PV)
|
||||
except exception.SysinvException:
|
||||
LOG.exception("Sysinv Agent exception updating ipv"
|
||||
"conductor.")
|
||||
self._prev_pv = None
|
||||
pass
|
||||
|
||||
self._create_host_filesystems(rpcapi, icontext)
|
||||
|
||||
# Notify conductor of inventory completion after necessary
|
||||
|
@ -1518,7 +1505,8 @@ class AgentManager(service.PeriodicService):
|
|||
tsc.install_uuid = install_uuid
|
||||
|
||||
def _retry_on_personality_is_none(ex): # pylint: disable=no-self-argument
|
||||
LOG.info('Caught exception. Retrying... Exception: {}'.format(ex))
|
||||
LOG.info('Caught exception _retry_on_personality_is_none '
|
||||
'Retrying ... Exception: {}'.format(ex))
|
||||
return isinstance(ex, exception.LocalManagementPersonalityNotFound)
|
||||
|
||||
@retrying.retry(wait_fixed=10 * 1000, stop_max_delay=300 * 1000,
|
||||
|
@ -1573,20 +1561,22 @@ class AgentManager(service.PeriodicService):
|
|||
if not os.path.isfile(file_name_sysinv):
|
||||
shutil.copy2(file_name, file_name_sysinv)
|
||||
|
||||
# Remove resolv.conf file. It may have been created as a
|
||||
# symlink by the volatile configuration scripts.
|
||||
subprocess.call(["rm", "-f", file_name]) # pylint: disable=not-callable
|
||||
|
||||
if isinstance(file_content, dict):
|
||||
f_content = file_content.get(file_name)
|
||||
else:
|
||||
f_content = file_content
|
||||
|
||||
os.umask(0)
|
||||
if f_content is not None:
|
||||
with os.fdopen(os.open(file_name, os.O_CREAT | os.O_WRONLY,
|
||||
permissions), 'wb') as f:
|
||||
# create a temporary file to hold the runtime configuration values
|
||||
dirname = os.path.dirname(file_name)
|
||||
basename = os.path.basename(file_name)
|
||||
fd, tmppath = tempfile.mkstemp(dir=dirname, prefix=basename)
|
||||
with os.fdopen(fd, 'wb') as f:
|
||||
f.write(f_content)
|
||||
if os.path.islink(file_name):
|
||||
os.unlink(file_name)
|
||||
os.rename(tmppath, file_name)
|
||||
os.chmod(file_name, permissions)
|
||||
|
||||
self._update_config_applied(iconfig_uuid)
|
||||
self._report_config_applied(context)
|
||||
|
@ -1600,9 +1590,27 @@ class AgentManager(service.PeriodicService):
|
|||
LOG.error("report_inventory unknown request=%s" % inventory_update)
|
||||
|
||||
def _retry_on_missing_inventory_info(ex): # pylint: disable=no-self-argument
|
||||
LOG.info('Caught exception. Retrying... Exception: {}'.format(ex))
|
||||
LOG.info('Caught exception _retry_on_missing_inventory_info. '
|
||||
'Retrying... Exception: {}'.format(ex))
|
||||
return isinstance(ex, exception.AgentInventoryInfoNotFound)
|
||||
|
||||
@staticmethod
|
||||
def _update_local_puppet_cache(hieradata_path):
|
||||
cache_dir = PUPPET_HIERADATA_CACHE_PATH
|
||||
cache_dir_temp = cache_dir + '.temp'
|
||||
try:
|
||||
if os.path.isdir(cache_dir_temp):
|
||||
shutil.rmtree(cache_dir_temp)
|
||||
shutil.copytree(hieradata_path, cache_dir_temp)
|
||||
subprocess.check_call(['sync']) # pylint: disable=not-callable
|
||||
|
||||
if os.path.isdir(cache_dir):
|
||||
shutil.rmtree(cache_dir)
|
||||
os.rename(cache_dir_temp, cache_dir)
|
||||
except Exception:
|
||||
LOG.exception("Failed to update local puppet cache.")
|
||||
raise
|
||||
|
||||
@retrying.retry(wait_fixed=15 * 1000, stop_max_delay=300 * 1000,
|
||||
retry_on_exception=_retry_on_missing_inventory_info)
|
||||
@utils.synchronized(LOCK_AGENT_ACTION, external=False)
|
||||
|
@ -1773,6 +1781,8 @@ class AgentManager(service.PeriodicService):
|
|||
finally:
|
||||
os.close(fd)
|
||||
os.remove(tmpfile)
|
||||
# Update local puppet cache anyway to be consistent.
|
||||
self._update_local_puppet_cache(hieradata_path)
|
||||
|
||||
def configure_ttys_dcd(self, context, uuid, ttys_dcd):
|
||||
"""Configure the getty on the serial device.
|
||||
|
@ -2033,20 +2043,22 @@ class AgentManager(service.PeriodicService):
|
|||
|
||||
return iscsi_initiator_name
|
||||
|
||||
def disk_format_gpt(self, context, host_uuid, idisk_dict,
|
||||
is_cinder_device):
|
||||
"""GPT format a disk
|
||||
def disk_prepare(self, context, host_uuid, idisk_dict,
|
||||
skip_format, is_cinder_device):
|
||||
"""prepare disk for system use.
|
||||
|
||||
:param context: an admin context
|
||||
:param host_uuid: ihost uuid unique id
|
||||
:param idisk_dict: values for idisk volume object
|
||||
:param skip_format: bool value tells if the idisk should be GPT formatted
|
||||
:param is_cinder_device: bool value tells if the idisk is for cinder
|
||||
"""
|
||||
LOG.debug("AgentManager.format_disk_gpt: %s" % idisk_dict)
|
||||
LOG.debug("AgentManager.disk_prepare: %s" % idisk_dict)
|
||||
if self._ihost_uuid and self._ihost_uuid == host_uuid:
|
||||
self._idisk_operator.disk_format_gpt(host_uuid,
|
||||
idisk_dict,
|
||||
is_cinder_device)
|
||||
self._idisk_operator.disk_prepare(host_uuid,
|
||||
idisk_dict,
|
||||
skip_format,
|
||||
is_cinder_device)
|
||||
|
||||
def update_host_memory(self, context, host_uuid):
|
||||
"""update the host memory
|
||||
|
@ -2081,3 +2093,42 @@ class AgentManager(service.PeriodicService):
|
|||
except subprocess.CalledProcessError:
|
||||
# Just log an error. Don't stop any callers from further execution.
|
||||
LOG.warn("Failed to update helm repo data for user sysadmin.")
|
||||
|
||||
def update_host_lvm(self, context, host_uuid):
|
||||
if self._ihost_uuid and self._ihost_uuid == host_uuid:
|
||||
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||
|
||||
ipartition = self._ipartition_operator.ipartition_get(skip_gpt_check=True)
|
||||
try:
|
||||
rpcapi.ipartition_update_by_ihost(
|
||||
context, self._ihost_uuid, ipartition)
|
||||
except AttributeError:
|
||||
# safe to ignore during upgrades
|
||||
LOG.warn("Skip updating ipartition rook conductor. "
|
||||
"Upgrade in progress?")
|
||||
except exception.SysinvException:
|
||||
LOG.exception("Sysinv Agent exception updating rook"
|
||||
"ipartition conductor.")
|
||||
|
||||
# Update local volume groups
|
||||
ilvg = self._ilvg_operator.ilvg_get()
|
||||
try:
|
||||
rpcapi.ilvg_update_by_ihost(context,
|
||||
self._ihost_uuid,
|
||||
ilvg)
|
||||
self._inventory_reported.add(self.LVG)
|
||||
except exception.SysinvException:
|
||||
LOG.exception("Sysinv Agent exception updating ilvg"
|
||||
"conductor.")
|
||||
|
||||
# Update physical volumes
|
||||
ipv = self._ipv_operator.ipv_get()
|
||||
try:
|
||||
rpcapi.ipv_update_by_ihost(context,
|
||||
self._ihost_uuid,
|
||||
ipv)
|
||||
self._inventory_reported.add(self.PV)
|
||||
except exception.SysinvException:
|
||||
LOG.exception("Sysinv Agent exception updating ipv"
|
||||
"conductor.")
|
||||
|
|
|
@ -69,13 +69,13 @@ class PartitionOperator(object):
|
|||
return sgdisk_part_info
|
||||
|
||||
@utils.skip_udev_partition_probe
|
||||
def get_partition_info(self, device_path, device_node):
|
||||
def get_partition_info(self, device_path, device_node, skip_gpt_check=False):
|
||||
"""Obtain all information needed for the partitions on a disk.
|
||||
:param: device_path: the disk's device path
|
||||
:param: device_node: the disk's device node
|
||||
:returns: list of partitions"""
|
||||
# Check that partition table format is GPT. Return 0 if not.
|
||||
if not utils.disk_is_gpt(device_node=device_node):
|
||||
if ((not utils.disk_is_gpt(device_node=device_node)) and (not skip_gpt_check)):
|
||||
LOG.debug("Format of disk node %s is not GPT." % device_node)
|
||||
return None
|
||||
|
||||
|
@ -116,7 +116,7 @@ class PartitionOperator(object):
|
|||
|
||||
return ipartitions
|
||||
|
||||
def ipartition_get(self):
|
||||
def ipartition_get(self, skip_gpt_check=False):
|
||||
"""Enumerate partitions
|
||||
:param self
|
||||
:returns list of partitions and attributes
|
||||
|
@ -136,7 +136,8 @@ class PartitionOperator(object):
|
|||
|
||||
try:
|
||||
new_partitions = self.get_partition_info(device_path=device_path,
|
||||
device_node=device_node)
|
||||
device_node=device_node,
|
||||
skip_gpt_check=skip_gpt_check)
|
||||
except IOError as e:
|
||||
LOG.error("Error getting new partitions for: %s. Reason: %s" %
|
||||
(device_node, str(e)))
|
||||
|
|
|
@ -36,7 +36,7 @@ class PVOperator(object):
|
|||
LOG.error("%s @ %s:%s" % (e, traceback.tb_frame.f_code.co_filename,
|
||||
traceback.tb_lineno))
|
||||
|
||||
def ipv_get(self, cinder_device=None):
|
||||
def ipv_get(self, cinder_device=None, get_rook_device=False):
|
||||
'''Enumerate physical volume topology based on:
|
||||
|
||||
:param self
|
||||
|
@ -57,6 +57,10 @@ class PVOperator(object):
|
|||
',pv_size,pv_pe_count,pv_pe_alloc_count'\
|
||||
' --units B --nosuffix --noheadings'
|
||||
|
||||
if get_rook_device:
|
||||
disable_filter = ' --config \'devices/global_filter=["a|.*|"]\''
|
||||
pvdisplay_command = pvdisplay_command + disable_filter
|
||||
|
||||
# Execute the command
|
||||
try:
|
||||
pvdisplay_process = subprocess.Popen(pvdisplay_command,
|
||||
|
@ -105,6 +109,9 @@ class PVOperator(object):
|
|||
self.handle_exception("Could not execute vgreduce: %s" % e)
|
||||
continue
|
||||
|
||||
if (get_rook_device and ("ceph-" not in row)):
|
||||
continue
|
||||
|
||||
# get the values of fields as strings
|
||||
values = row.split(';')
|
||||
values = [v.strip() for v in values]
|
||||
|
@ -129,6 +136,13 @@ class PVOperator(object):
|
|||
else:
|
||||
ipv.append(attr)
|
||||
|
||||
if not get_rook_device:
|
||||
rook_pv = self.ipv_get(get_rook_device=True)
|
||||
|
||||
for i in rook_pv:
|
||||
if i not in ipv:
|
||||
ipv.append(i)
|
||||
|
||||
LOG.debug("ipv= %s" % ipv)
|
||||
|
||||
return ipv
|
||||
|
|
|
@ -274,22 +274,24 @@ class AgentAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
|
||||
return retval
|
||||
|
||||
def disk_format_gpt(self, context, host_uuid, idisk_dict,
|
||||
is_cinder_device):
|
||||
"""Asynchronously, GPT format a disk.
|
||||
def disk_prepare(self, context, host_uuid, idisk_dict,
|
||||
skip_format, is_cinder_device):
|
||||
"""Asynchronously, prepare a disk for system use.
|
||||
|
||||
:param context: an admin context
|
||||
:param host_uuid: ihost uuid unique id
|
||||
:param idisk_dict: values for disk object
|
||||
:param skip_format: bool value tells if the idisk should be GPT formatted
|
||||
:param is_cinder_device: bool value tells if the idisk is for cinder
|
||||
:returns: pass or fail
|
||||
"""
|
||||
|
||||
return self.fanout_cast(
|
||||
context,
|
||||
self.make_msg('disk_format_gpt',
|
||||
self.make_msg('disk_prepare',
|
||||
host_uuid=host_uuid,
|
||||
idisk_dict=idisk_dict,
|
||||
skip_format=skip_format,
|
||||
is_cinder_device=is_cinder_device))
|
||||
|
||||
def update_host_memory(self, context, host_uuid):
|
||||
|
@ -317,3 +319,15 @@ class AgentAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
'refresh_helm_repo_information'))
|
||||
|
||||
return retval
|
||||
|
||||
def update_host_lvm(self, context, host_uuid):
|
||||
"""Synchronously, update LVM physical volume
|
||||
|
||||
:param context: an admin context
|
||||
:param host_uuid: ihost uuid unique id
|
||||
:returns: pass or fail
|
||||
"""
|
||||
|
||||
return self.call(context,
|
||||
self.make_msg('update_host_lvm',
|
||||
host_uuid=host_uuid))
|
||||
|
|
|
@ -67,8 +67,7 @@ class Root(base.APIBase):
|
|||
"management of physical servers. This includes inventory "
|
||||
"collection and configuration of hosts, ports, interfaces, CPUs, disk, "
|
||||
"memory, and system configuration. The API also supports "
|
||||
"alarms and fault collection for the cloud itself as well "
|
||||
"as the configuration of the cloud's SNMP interface. "
|
||||
"alarms and fault collection for the cloud itself."
|
||||
)
|
||||
root.versions = [Version.convert('v1')]
|
||||
root.default_version = Version.convert('v1')
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -25,7 +25,6 @@ from sysinv.api.controllers.v1 import address_pool
|
|||
from sysinv.api.controllers.v1 import base
|
||||
from sysinv.api.controllers.v1 import ceph_mon
|
||||
from sysinv.api.controllers.v1 import cluster
|
||||
from sysinv.api.controllers.v1 import community
|
||||
from sysinv.api.controllers.v1 import controller_fs
|
||||
from sysinv.api.controllers.v1 import cpu
|
||||
from sysinv.api.controllers.v1 import device_image
|
||||
|
@ -42,6 +41,7 @@ from sysinv.api.controllers.v1 import health
|
|||
from sysinv.api.controllers.v1 import helm_charts
|
||||
from sysinv.api.controllers.v1 import host
|
||||
from sysinv.api.controllers.v1 import kube_app
|
||||
from sysinv.api.controllers.v1 import kube_cluster
|
||||
from sysinv.api.controllers.v1 import kube_host_upgrade
|
||||
from sysinv.api.controllers.v1 import kube_upgrade
|
||||
from sysinv.api.controllers.v1 import kube_version
|
||||
|
@ -85,8 +85,8 @@ from sysinv.api.controllers.v1 import storage_file
|
|||
from sysinv.api.controllers.v1 import storage_external
|
||||
from sysinv.api.controllers.v1 import storage_tier
|
||||
from sysinv.api.controllers.v1 import storage_ceph_external
|
||||
from sysinv.api.controllers.v1 import storage_ceph_rook
|
||||
from sysinv.api.controllers.v1 import system
|
||||
from sysinv.api.controllers.v1 import trapdest
|
||||
from sysinv.api.controllers.v1 import upgrade
|
||||
from sysinv.api.controllers.v1 import user
|
||||
from sysinv.api.controllers.v1 import host_fs
|
||||
|
@ -136,12 +136,6 @@ class V1(base.APIBase):
|
|||
iprofile = [link.Link]
|
||||
"Links to the iprofile resource"
|
||||
|
||||
itrapdest = [link.Link]
|
||||
"Links to the itrapdest node cluster resource"
|
||||
|
||||
icommunity = [link.Link]
|
||||
"Links to the icommunity node cluster resource"
|
||||
|
||||
iuser = [link.Link]
|
||||
"Links to the iuser resource"
|
||||
|
||||
|
@ -181,6 +175,9 @@ class V1(base.APIBase):
|
|||
storage_ceph_external = [link.Link]
|
||||
"Links to the storage exteral ceph resource"
|
||||
|
||||
storage_ceph_rook = [link.Link]
|
||||
"Links to the storage rook ceph resource"
|
||||
|
||||
ceph_mon = [link.Link]
|
||||
"Links to the ceph mon resource"
|
||||
|
||||
|
@ -256,6 +253,9 @@ class V1(base.APIBase):
|
|||
host_fs = [link.Link]
|
||||
"Links to the host_fs resource"
|
||||
|
||||
kube_clusters = [link.Link]
|
||||
"Links to the kube_cluster resource"
|
||||
|
||||
kube_versions = [link.Link]
|
||||
"Links to the kube_version resource"
|
||||
|
||||
|
@ -418,22 +418,6 @@ class V1(base.APIBase):
|
|||
bookmark=True)
|
||||
]
|
||||
|
||||
v1.itrapdest = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'itrapdest', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'itrapdest', '',
|
||||
bookmark=True)
|
||||
]
|
||||
|
||||
v1.icommunity = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'icommunity', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'icommunity', '',
|
||||
bookmark=True)
|
||||
]
|
||||
|
||||
v1.iuser = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'iuser', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
|
@ -537,6 +521,16 @@ class V1(base.APIBase):
|
|||
bookmark=True)
|
||||
]
|
||||
|
||||
v1.storage_ceph_rook = [
|
||||
link.Link.make_link('self',
|
||||
pecan.request.host_url,
|
||||
'storage_ceph_rook', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'storage_ceph_rook', '',
|
||||
bookmark=True)
|
||||
]
|
||||
|
||||
v1.ceph_mon = [link.Link.make_link('self',
|
||||
pecan.request.host_url,
|
||||
'ceph_mon', ''),
|
||||
|
@ -803,6 +797,13 @@ class V1(base.APIBase):
|
|||
'host_fs', '',
|
||||
bookmark=True)]
|
||||
|
||||
v1.kube_clusters = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'kube_clusters', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.host_url,
|
||||
'kube_clusters', '',
|
||||
bookmark=True)]
|
||||
|
||||
v1.kube_versions = [link.Link.make_link('self', pecan.request.host_url,
|
||||
'kube_versions', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
|
@ -874,8 +875,6 @@ class Controller(rest.RestController):
|
|||
idisks = disk.DiskController()
|
||||
partitions = partition.PartitionController()
|
||||
iprofile = profile.ProfileController()
|
||||
itrapdest = trapdest.TrapDestController()
|
||||
icommunity = community.CommunityController()
|
||||
iuser = user.UserController()
|
||||
idns = dns.DNSController()
|
||||
intp = ntp.NTPController()
|
||||
|
@ -890,6 +889,8 @@ class Controller(rest.RestController):
|
|||
storage_tiers = storage_tier.StorageTierController()
|
||||
storage_ceph_external = \
|
||||
storage_ceph_external.StorageCephExternalController()
|
||||
storage_ceph_rook = \
|
||||
storage_ceph_rook.StorageCephRookController()
|
||||
ceph_mon = ceph_mon.CephMonController()
|
||||
drbdconfig = drbdconfig.drbdconfigsController()
|
||||
addresses = address.AddressController()
|
||||
|
@ -921,6 +922,7 @@ class Controller(rest.RestController):
|
|||
datanetworks = datanetwork.DataNetworkController()
|
||||
interface_datanetworks = interface_datanetwork.InterfaceDataNetworkController()
|
||||
host_fs = host_fs.HostFsController()
|
||||
kube_clusters = kube_cluster.KubeClusterController()
|
||||
kube_versions = kube_version.KubeVersionController()
|
||||
kube_upgrade = kube_upgrade.KubeUpgradeController()
|
||||
kube_host_upgrades = kube_host_upgrade.KubeHostUpgradeController()
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2015-2017 Wind River Systems, Inc.
|
||||
# Copyright (c) 2015-2021 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
|
@ -55,6 +55,11 @@ ADDRPOOL_CONTROLLER1_ADDRESS_ID = 'controller1_address_id'
|
|||
ADDRPOOL_FLOATING_ADDRESS_ID = 'floating_address_id'
|
||||
ADDRPOOL_GATEWAY_ADDRESS_ID = 'gateway_address_id'
|
||||
|
||||
# Address pool for system controller in the subcloud are
|
||||
# allowed to be deleted/modified post install
|
||||
SYSTEM_CONTROLLER_ADDRPOOLS = ['system-controller-subnet',
|
||||
'system-controller-oam-subnet']
|
||||
|
||||
|
||||
class AddressPoolPatchType(types.JsonPatchType):
|
||||
"""A complex type that represents a single json-patch operation."""
|
||||
|
@ -332,14 +337,17 @@ class AddressPoolController(rest.RestController):
|
|||
self._check_valid_range(network, start, end, ipset)
|
||||
ipset.update(netaddr.IPRange(start, end))
|
||||
|
||||
def _check_pool_readonly(self, address_pool_id):
|
||||
networks = pecan.request.dbapi.networks_get_by_pool(address_pool_id)
|
||||
# Pool is considered readonly after the initial configuration is
|
||||
# complete. During bootstrap it should be modifiable even though
|
||||
# it is allocated to a network.
|
||||
if networks and cutils.is_initial_config_complete():
|
||||
# network managed address pool, no changes permitted
|
||||
raise exception.AddressPoolReadonly()
|
||||
def _check_pool_readonly(self, addrpool):
|
||||
# The system controller's network pools are expected writeable for re-home
|
||||
# a subcloud to new system controllers.
|
||||
if addrpool.name not in SYSTEM_CONTROLLER_ADDRPOOLS:
|
||||
networks = pecan.request.dbapi.networks_get_by_pool(addrpool.id)
|
||||
# An addresspool except the system controller's pools, is considered
|
||||
# readonly after the initial configuration is complete. During bootstrap
|
||||
# it should be modifiable even though it is allocated to a network.
|
||||
if networks and cutils.is_initial_config_complete():
|
||||
# network managed address pool, no changes permitted
|
||||
raise exception.AddressPoolReadonly()
|
||||
|
||||
def _make_default_range(self, addrpool):
|
||||
ipset = netaddr.IPSet([addrpool['network'] + "/" + str(addrpool['prefix'])])
|
||||
|
@ -550,7 +558,7 @@ class AddressPoolController(rest.RestController):
|
|||
"""Updates attributes of an IP address pool."""
|
||||
addrpool = self._get_one(address_pool_uuid)
|
||||
updates = self._get_updates(patch)
|
||||
self._check_pool_readonly(addrpool.id)
|
||||
self._check_pool_readonly(addrpool)
|
||||
self._validate_updates(addrpool, updates)
|
||||
return pecan.request.dbapi.address_pool_update(
|
||||
address_pool_uuid, updates)
|
||||
|
@ -560,11 +568,16 @@ class AddressPoolController(rest.RestController):
|
|||
def delete(self, address_pool_uuid):
|
||||
"""Delete an IP address pool."""
|
||||
addrpool = self._get_one(address_pool_uuid)
|
||||
self._check_pool_readonly(addrpool.id)
|
||||
self._check_pool_readonly(addrpool)
|
||||
addresses = pecan.request.dbapi.addresses_get_by_pool(
|
||||
addrpool.id)
|
||||
if addresses:
|
||||
if cutils.is_initial_config_complete():
|
||||
# All the initial configured addresspools are not deleteable,
|
||||
# except the system controller's network addresspool, which
|
||||
# can be deleted/re-added during re-homing a subcloud to new
|
||||
# system controllers
|
||||
if cutils.is_initial_config_complete() and \
|
||||
(addrpool.name not in SYSTEM_CONTROLLER_ADDRPOOLS):
|
||||
raise exception.AddressPoolInUseByAddresses()
|
||||
else:
|
||||
# Must be a request as a result of network reconfiguration
|
||||
|
|
|
@ -308,24 +308,25 @@ class CertificateController(rest.RestController):
|
|||
capabilities = system.capabilities
|
||||
|
||||
# platform-cert 'force' check for backward compatibility
|
||||
if mode == constants.CERT_MODE_SSL:
|
||||
if self._is_mode_supported_by_cert_manager(mode):
|
||||
# Call may not contain 'force' parameter
|
||||
# Note: cert-mon will pass a HTTP POST 'force'='true' param
|
||||
force = pecan.request.POST.get('force')
|
||||
if force == 'true':
|
||||
force = True
|
||||
else:
|
||||
force = False
|
||||
# if PLATFORM_CERT_SECRET_NAME secret is present in k8s, we
|
||||
# assume that SSL cert is managed by cert-manager/cert-mon
|
||||
force_param = pecan.request.POST.get('force')
|
||||
force = force_param == "true"
|
||||
|
||||
plat_cert_name = self._get_secret_name_for_mode(mode)
|
||||
|
||||
# if the certificate secret is present in k8s, we
|
||||
# assume that SSL cert is currently being managed by
|
||||
# cert-manager/cert-mon
|
||||
managed_by_cm = self._kube_op.kube_get_secret(
|
||||
constants.PLATFORM_CERT_SECRET_NAME,
|
||||
plat_cert_name,
|
||||
constants.CERT_NAMESPACE_PLATFORM_CERTS)
|
||||
|
||||
if force is False and managed_by_cm is not None:
|
||||
msg = "Certificate is currently being managed by cert-manager. \n" \
|
||||
"To manage certificate with this command, first delete " \
|
||||
"the %s Certificate and Secret." % constants.PLATFORM_CERT_SECRET_NAME
|
||||
"the %s Certificate and Secret." % plat_cert_name
|
||||
LOG.info(msg)
|
||||
return dict(success="", error=msg)
|
||||
|
||||
|
@ -572,6 +573,14 @@ class CertificateController(rest.RestController):
|
|||
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def _get_secret_name_for_mode(mode):
|
||||
return constants.CERT_MODE_TO_SECRET_NAME[mode]
|
||||
|
||||
@staticmethod
|
||||
def _is_mode_supported_by_cert_manager(mode):
|
||||
return mode in constants.CERT_MODES_SUPPORTED_CERT_MANAGER
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(Certificate, types.uuid, status_code=200)
|
||||
def delete(self, certificate_uuid):
|
||||
|
|
|
@ -1,238 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (c) 2013-2016 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
import jsonpatch
|
||||
import pecan
|
||||
from pecan import rest
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from oslo_db.exception import DBDuplicateEntry
|
||||
from oslo_db.exception import DBError
|
||||
from oslo_log import log
|
||||
from sysinv._i18n import _
|
||||
from sysinv.api.controllers.v1 import base
|
||||
from sysinv.api.controllers.v1 import collection
|
||||
from sysinv.api.controllers.v1 import link
|
||||
from sysinv.api.controllers.v1 import types
|
||||
from sysinv.api.controllers.v1 import utils as api_utils
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv import objects
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class CommunityPatchType(types.JsonPatchType):
|
||||
pass
|
||||
|
||||
|
||||
class Community(base.APIBase):
|
||||
"""API representation of a Community.
|
||||
|
||||
This class enforces type checking and value constraints, and converts
|
||||
between the internal object model and the API representation of
|
||||
a icommunity.
|
||||
"""
|
||||
|
||||
uuid = types.uuid
|
||||
"The UUID of the icommunity"
|
||||
|
||||
community = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"The community string of which the SNMP client is a member"
|
||||
|
||||
view = wtypes.text
|
||||
"The SNMP MIB View"
|
||||
|
||||
access = wtypes.text
|
||||
"The SNMP GET/SET access control"
|
||||
|
||||
links = [link.Link]
|
||||
"A list containing a self link and associated community string links"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.fields = objects.community.fields.keys()
|
||||
for k in self.fields:
|
||||
setattr(self, k, kwargs.get(k))
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, rpc_icommunity, expand=True):
|
||||
minimum_fields = ['id', 'uuid', 'community',
|
||||
'view', 'access']
|
||||
|
||||
fields = minimum_fields if not expand else None
|
||||
|
||||
icomm = Community.from_rpc_object(rpc_icommunity, fields)
|
||||
|
||||
return icomm
|
||||
|
||||
|
||||
class CommunityCollection(collection.Collection):
|
||||
"""API representation of a collection of icommunity."""
|
||||
|
||||
icommunity = [Community]
|
||||
"A list containing icommunity objects"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._type = 'icommunity'
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, icommunity, limit, url=None,
|
||||
expand=False, **kwargs):
|
||||
collection = CommunityCollection()
|
||||
collection.icommunity = [Community.convert_with_links(ch, expand)
|
||||
for ch in icommunity]
|
||||
# url = url or None
|
||||
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||
return collection
|
||||
|
||||
|
||||
LOCK_NAME = 'CommunityController'
|
||||
|
||||
|
||||
class CommunityController(rest.RestController):
|
||||
"""REST controller for icommunity."""
|
||||
|
||||
_custom_actions = {
|
||||
'detail': ['GET'],
|
||||
}
|
||||
|
||||
def _get_icommunity_collection(self, marker, limit, sort_key, sort_dir,
|
||||
expand=False, resource_url=None):
|
||||
limit = api_utils.validate_limit(limit)
|
||||
sort_dir = api_utils.validate_sort_dir(sort_dir)
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.community.get_by_uuid(pecan.request.context,
|
||||
marker)
|
||||
icomm = pecan.request.dbapi.icommunity_get_list(limit, marker_obj,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
return CommunityCollection.convert_with_links(icomm, limit,
|
||||
url=resource_url,
|
||||
expand=expand,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
|
||||
@wsme_pecan.wsexpose(CommunityCollection, types.uuid,
|
||||
int, wtypes.text, wtypes.text)
|
||||
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||
"""Retrieve a list of icommunity.
|
||||
|
||||
:param marker: pagination marker for large data sets.
|
||||
:param limit: maximum number of resources to return in a single result.
|
||||
:param sort_key: column to sort results by. Default: id.
|
||||
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
||||
"""
|
||||
return self._get_icommunity_collection(marker, limit, sort_key, sort_dir)
|
||||
|
||||
@wsme_pecan.wsexpose(CommunityCollection, types.uuid, int,
|
||||
wtypes.text, wtypes.text)
|
||||
def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||
"""Retrieve a list of icommunity with detail.
|
||||
|
||||
:param marker: pagination marker for large data sets.
|
||||
:param limit: maximum number of resources to return in a single result.
|
||||
:param sort_key: column to sort results by. Default: id.
|
||||
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
||||
"""
|
||||
# /detail should only work agaist collections
|
||||
parent = pecan.request.path.split('/')[:-1][-1]
|
||||
if parent != "icommunity":
|
||||
raise exception.HTTPNotFound
|
||||
|
||||
expand = True
|
||||
resource_url = '/'.join(['icommunity', 'detail'])
|
||||
return self._get_icommunity_collection(marker, limit, sort_key, sort_dir,
|
||||
expand, resource_url)
|
||||
|
||||
@wsme_pecan.wsexpose(Community, wtypes.text)
|
||||
def get_one(self, name):
|
||||
"""Retrieve information about the given icommunity.
|
||||
|
||||
:param icommunity_uuid: UUID of a icommunity.
|
||||
"""
|
||||
rpc_icommunity = objects.community.get_by_name(
|
||||
pecan.request.context, name)
|
||||
return Community.convert_with_links(rpc_icommunity)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(Community, body=Community)
|
||||
def post(self, icommunity):
|
||||
"""Create a new icommunity.
|
||||
|
||||
:param icommunity: a icommunity within the request body.
|
||||
"""
|
||||
try:
|
||||
new_icommunity = \
|
||||
pecan.request.dbapi.icommunity_create(icommunity.as_dict())
|
||||
except DBDuplicateEntry as e:
|
||||
LOG.error(e)
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Rejected: Cannot add %s, it is an existing community.") % icommunity.as_dict().get('community'))
|
||||
except DBError as e:
|
||||
LOG.error(e)
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Database check error on community %s create.") % icommunity.as_dict().get('community'))
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Database error on community %s create. See log for details.") % icommunity.as_dict().get('community'))
|
||||
|
||||
# update snmpd.conf
|
||||
pecan.request.rpcapi.update_snmp_config(pecan.request.context)
|
||||
return icommunity.convert_with_links(new_icommunity)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme.validate(types.uuid, [CommunityPatchType])
|
||||
@wsme_pecan.wsexpose(Community, types.uuid, body=[CommunityPatchType])
|
||||
def patch(self, icommunity_uuid, patch):
|
||||
"""Update an existing icommunity.
|
||||
|
||||
:param icommunity_uuid: UUID of a icommunity.
|
||||
:param patch: a json PATCH document to apply to this icommunity.
|
||||
"""
|
||||
rpc_icommunity = objects.community.get_by_uuid(pecan.request.context,
|
||||
icommunity_uuid)
|
||||
try:
|
||||
icomm = Community(**jsonpatch.apply_patch(rpc_icommunity.as_dict(),
|
||||
jsonpatch.JsonPatch(patch)))
|
||||
except api_utils.JSONPATCH_EXCEPTIONS as e:
|
||||
raise exception.PatchError(patch=patch, reason=e)
|
||||
|
||||
# Update only the fields that have changed
|
||||
comm = ""
|
||||
for field in objects.community.fields:
|
||||
if rpc_icommunity[field] != getattr(icomm, field):
|
||||
rpc_icommunity[field] = getattr(icomm, field)
|
||||
if field == 'community':
|
||||
comm = rpc_icommunity[field]
|
||||
|
||||
rpc_icommunity.save()
|
||||
|
||||
if comm:
|
||||
LOG.debug("Modify community: uuid (%s) community (%s) ",
|
||||
icommunity_uuid, comm)
|
||||
|
||||
return Community.convert_with_links(rpc_icommunity)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
|
||||
def delete(self, name):
|
||||
"""Delete a icommunity.
|
||||
|
||||
:param name: community name of a icommunity.
|
||||
"""
|
||||
pecan.request.dbapi.icommunity_destroy(name)
|
||||
# update snmpd.conf
|
||||
pecan.request.rpcapi.update_snmp_config(pecan.request.context)
|
|
@ -84,6 +84,9 @@ class CPU(base.APIBase):
|
|||
function = wtypes.text
|
||||
"Represent the function of the icpu"
|
||||
|
||||
cpulist = wtypes.text
|
||||
"The list of CPUs for this function"
|
||||
|
||||
num_cores_on_processor0 = wtypes.text
|
||||
"The number of cores on processors 0"
|
||||
|
||||
|
@ -126,6 +129,8 @@ class CPU(base.APIBase):
|
|||
# API only attributes
|
||||
self.fields.append('function')
|
||||
setattr(self, 'function', kwargs.get('function', None))
|
||||
self.fields.append('cpulist')
|
||||
setattr(self, 'cpulist', kwargs.get('cpulist', None))
|
||||
self.fields.append('num_cores_on_processor0')
|
||||
setattr(self, 'num_cores_on_processor0',
|
||||
kwargs.get('num_cores_on_processor0', None))
|
||||
|
|
|
@ -208,6 +208,29 @@ def get_cpu_counts(host):
|
|||
return counts
|
||||
|
||||
|
||||
def append_ht_sibling(host, cpu_list):
|
||||
"""Append to cpu_list the hyperthread siblings for the cpus in the list"""
|
||||
# TODO: Add UTs for this.
|
||||
|
||||
# There's probably a more efficient way to do this.
|
||||
cpus_to_add = []
|
||||
for cpu_num in cpu_list:
|
||||
# Get node/core for specified cpu number
|
||||
for cpu in host.cpus:
|
||||
if cpu.cpu == cpu_num:
|
||||
# We've found the cpu of interest, now check for siblings
|
||||
for cpu2 in host.cpus:
|
||||
if cpu2.numa_node == cpu.numa_node and \
|
||||
cpu2.core == cpu.core and \
|
||||
cpu2.thread != cpu.thread:
|
||||
cpus_to_add.append(cpu2.cpu)
|
||||
break
|
||||
break
|
||||
# Add in the HT siblings, then remove any duplicates.
|
||||
cpus_to_add.extend(cpu_list)
|
||||
return list(set(cpus_to_add))
|
||||
|
||||
|
||||
def init_cpu_counts(host):
|
||||
"""Create empty data structures to track CPU assignments by socket and
|
||||
function."""
|
||||
|
@ -249,8 +272,22 @@ def restructure_host_cpu_data(host):
|
|||
host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))
|
||||
|
||||
|
||||
def check_core_allocations(host, cpu_counts):
|
||||
def check_core_allocations(host, cpu_counts, cpu_lists=None):
|
||||
"""Check that minimum and maximum core values are respected."""
|
||||
|
||||
if cpu_lists:
|
||||
# Verify no overlaps in cpulists for different functions. Not all
|
||||
# functions are guaranteed to be present as keys in cpu_lists.
|
||||
cpulist = []
|
||||
for function in CORE_FUNCTIONS:
|
||||
functionlist = cpu_lists.get(function, [])
|
||||
if set(cpulist).intersection(functionlist):
|
||||
raise wsme.exc.ClientSideError(
|
||||
"Some CPUs are specified for more than one function.")
|
||||
cpulist.extend(functionlist)
|
||||
|
||||
# NOTE: contrary to the variable names, these are actually logical CPUs
|
||||
# rather than cores, so if hyperthreading is enabled they're SMT siblings.
|
||||
total_platform_cores = 0
|
||||
total_vswitch_cores = 0
|
||||
total_shared_cores = 0
|
||||
|
@ -272,7 +309,15 @@ def check_core_allocations(host, cpu_counts):
|
|||
total_shared_cores += shared_cores
|
||||
total_isolated_cores += isolated_cores
|
||||
|
||||
# Validate Platform cores
|
||||
# Add any cpus specified via ranges to the totals.
|
||||
# Note: Can't specify by both count and range for the same function.
|
||||
if cpu_lists:
|
||||
total_platform_cores += len(cpu_lists.get(constants.PLATFORM_FUNCTION, []))
|
||||
total_vswitch_cores += len(cpu_lists.get(constants.VSWITCH_FUNCTION, []))
|
||||
total_shared_cores += len(cpu_lists.get(constants.SHARED_FUNCTION, []))
|
||||
total_isolated_cores += len(cpu_lists.get(constants.ISOLATED_FUNCTION, []))
|
||||
|
||||
# Validate Platform cores (actually logical CPUs)
|
||||
if ((constants.CONTROLLER in host.subfunctions) and
|
||||
(constants.WORKER in host.subfunctions)):
|
||||
if total_platform_cores < 2:
|
||||
|
@ -282,7 +327,7 @@ def check_core_allocations(host, cpu_counts):
|
|||
raise wsme.exc.ClientSideError("%s must have at least one core." %
|
||||
constants.PLATFORM_FUNCTION)
|
||||
|
||||
# Validate shared cores
|
||||
# Validate shared cores (actually logical CPUs)
|
||||
for s in range(0, len(host.nodes)):
|
||||
shared_cores = cpu_counts[s][constants.SHARED_FUNCTION]
|
||||
if host.hyperthreading:
|
||||
|
@ -292,7 +337,7 @@ def check_core_allocations(host, cpu_counts):
|
|||
'%s cores are limited to 1 per processor.'
|
||||
% constants.SHARED_FUNCTION)
|
||||
|
||||
# Validate vswitch cores
|
||||
# Validate vswitch cores (actually logical CPUs)
|
||||
if total_vswitch_cores != 0:
|
||||
vswitch_type = cutils.get_vswitch_type(pecan.request.dbapi)
|
||||
if constants.VSWITCH_TYPE_NONE == vswitch_type:
|
||||
|
@ -308,7 +353,7 @@ def check_core_allocations(host, cpu_counts):
|
|||
"The %s function can only be assigned up to %s cores." %
|
||||
(constants.VSWITCH_FUNCTION.lower(), VSWITCH_MAX_CORES))
|
||||
|
||||
# Validate Isolated cores:
|
||||
# Validate Isolated cores: (actually logical CPUs)
|
||||
# - Prevent isolated core assignment if vswitch or shared cores are
|
||||
# allocated.
|
||||
if total_isolated_cores > 0:
|
||||
|
@ -326,31 +371,57 @@ def check_core_allocations(host, cpu_counts):
|
|||
constants.APPLICATION_FUNCTION)
|
||||
|
||||
|
||||
def update_core_allocations(host, cpu_counts):
|
||||
def node_from_cpu(host, cpu_num):
|
||||
for cpu in host.cpus:
|
||||
if cpu.cpu == cpu_num:
|
||||
return cpu.numa_node
|
||||
raise wsme.exc.ClientSideError("Specified CPU %s is invalid." % cpu_num)
|
||||
|
||||
|
||||
def update_core_allocations(host, cpu_counts, cpulists=None):
|
||||
"""Update the per socket/function cpu list based on the newly requested
|
||||
counts."""
|
||||
# Remove any previous assignments
|
||||
for s in range(0, len(host.nodes)):
|
||||
for f in CORE_FUNCTIONS:
|
||||
host.cpu_functions[s][f] = []
|
||||
# Set new assignments
|
||||
|
||||
# Make per-numa-node lists of available CPUs
|
||||
cpu_lists = {}
|
||||
for s in range(0, len(host.nodes)):
|
||||
cpu_lists[s] = list(host.cpu_lists[s]) if s in host.cpu_lists else []
|
||||
|
||||
# We need to reserve all of the cpulist-specified CPUs first, then
|
||||
# reserve by counts.
|
||||
for function in CORE_FUNCTIONS:
|
||||
if cpulists and function in cpulists:
|
||||
for cpu in cpulists[function]:
|
||||
node = node_from_cpu(host, cpu)
|
||||
host.cpu_functions[node][function].append(cpu)
|
||||
cpu_lists[node].remove(cpu)
|
||||
|
||||
for s in range(0, len(host.nodes)):
|
||||
cpu_list = host.cpu_lists[s] if s in host.cpu_lists else []
|
||||
# Reserve for the platform first
|
||||
for i in range(0, cpu_counts[s][constants.PLATFORM_FUNCTION]):
|
||||
host.cpu_functions[s][constants.PLATFORM_FUNCTION].append(
|
||||
cpu_list.pop(0))
|
||||
cpu_lists[s].pop(0))
|
||||
|
||||
# Reserve for the vswitch next
|
||||
for i in range(0, cpu_counts[s][constants.VSWITCH_FUNCTION]):
|
||||
host.cpu_functions[s][constants.VSWITCH_FUNCTION].append(
|
||||
cpu_list.pop(0))
|
||||
cpu_lists[s].pop(0))
|
||||
|
||||
# Reserve for the shared next
|
||||
for i in range(0, cpu_counts[s][constants.SHARED_FUNCTION]):
|
||||
host.cpu_functions[s][constants.SHARED_FUNCTION].append(
|
||||
cpu_list.pop(0))
|
||||
cpu_lists[s].pop(0))
|
||||
|
||||
# Reserve for the isolated next
|
||||
for i in range(0, cpu_counts[s][constants.ISOLATED_FUNCTION]):
|
||||
host.cpu_functions[s][constants.ISOLATED_FUNCTION].append(
|
||||
cpu_list.pop(0))
|
||||
cpu_lists[s].pop(0))
|
||||
|
||||
# Assign the remaining cpus to the default function for this host
|
||||
host.cpu_functions[s][get_default_function(host)] += cpu_list
|
||||
host.cpu_functions[s][get_default_function(host)] += cpu_lists[s]
|
||||
|
||||
return
|
||||
|
|
|
@ -376,14 +376,16 @@ class DiskController(rest.RestController):
|
|||
rpc_idisk = objects.disk.get_by_uuid(
|
||||
pecan.request.context, idisk_uuid)
|
||||
|
||||
format_disk = True
|
||||
format_supported = True
|
||||
for p in patch:
|
||||
if p['path'] == '/skip_formatting':
|
||||
skip_format = p['value'].lower() == 'true'
|
||||
if p['path'] == '/partition_table':
|
||||
value = p['value']
|
||||
if value != constants.PARTITION_TABLE_GPT:
|
||||
format_disk = False
|
||||
format_supported = False
|
||||
|
||||
if not format_disk:
|
||||
if not format_supported:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Only %s disk formatting is supported." %
|
||||
constants.PARTITION_TABLE_GPT))
|
||||
|
@ -392,10 +394,11 @@ class DiskController(rest.RestController):
|
|||
|
||||
is_cinder_device = False
|
||||
rpcapi = agent_rpcapi.AgentAPI()
|
||||
rpcapi.disk_format_gpt(pecan.request.context,
|
||||
rpc_idisk.get('ihost_uuid'),
|
||||
rpc_idisk.as_dict(),
|
||||
is_cinder_device)
|
||||
rpcapi.disk_prepare(pecan.request.context,
|
||||
rpc_idisk.get('ihost_uuid'),
|
||||
rpc_idisk.as_dict(),
|
||||
skip_format,
|
||||
is_cinder_device)
|
||||
|
||||
|
||||
def _semantic_checks_format(idisk):
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
@ -95,6 +95,8 @@ from sysinv.common import utils as cutils
|
|||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
from sysinv.common import health
|
||||
|
||||
from sysinv.openstack.common.rpc import common as rpc_common
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
KEYRING_BM_SERVICE = "BM"
|
||||
ERR_CODE_LOCK_SOLE_SERVICE_PROVIDER = "-1003"
|
||||
|
@ -231,7 +233,8 @@ class HostStatesController(rest.RestController):
|
|||
Example:
|
||||
capabilities=[{'function': 'platform', 'sockets': [{'0': 1}, {'1': 0}]},
|
||||
{'function': 'vswitch', 'sockets': [{'0': 2}]},
|
||||
{'function': 'shared', 'sockets': [{'0': 1}, {'1': 1}]}]
|
||||
{'function': 'shared', 'sockets': [{'0': 1}, {'1': 1}]},
|
||||
{'function': 'application-isolated', 'cpulist': '3-5,6'}]
|
||||
"""
|
||||
LOG.info("host_cpus_modify host_uuid=%s capabilities=%s" %
|
||||
(host_uuid, capabilities))
|
||||
|
@ -242,16 +245,28 @@ class HostStatesController(rest.RestController):
|
|||
ihost.nodes = pecan.request.dbapi.inode_get_by_ihost(ihost.uuid)
|
||||
num_nodes = len(ihost.nodes)
|
||||
|
||||
# Query the database to get the current set of CPUs
|
||||
ihost.cpus = pecan.request.dbapi.icpu_get_by_ihost(ihost.uuid)
|
||||
|
||||
# Perform basic sanity on the input
|
||||
for icap in capabilities:
|
||||
specified_function = icap.get('function', None)
|
||||
specified_sockets = icap.get('sockets', None)
|
||||
if not specified_function or not specified_sockets:
|
||||
specified_sockets = icap.get('sockets', [])
|
||||
specified_cpulist = icap.get('cpulist', None)
|
||||
|
||||
if specified_sockets and specified_cpulist:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_('host %s: cpu function=%s or socket=%s not specified '
|
||||
'for host %s.') % (host_uuid,
|
||||
specified_function,
|
||||
specified_sockets))
|
||||
_('host %s: socket=%s and cpulist=%s may not both be specified') %
|
||||
(host_uuid, specified_sockets, specified_cpulist))
|
||||
|
||||
if not specified_function or not (specified_sockets or specified_cpulist):
|
||||
raise wsme.exc.ClientSideError(
|
||||
_('host %s: cpu function=%s or (socket=%s and cpulist=%s) '
|
||||
'not specified') % (host_uuid,
|
||||
specified_function,
|
||||
specified_sockets,
|
||||
specified_cpulist))
|
||||
|
||||
for specified_socket in specified_sockets:
|
||||
socket, value = specified_socket.items()[0]
|
||||
if int(socket) >= num_nodes:
|
||||
|
@ -262,22 +277,35 @@ class HostStatesController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(
|
||||
_('Specified cpu values must be non-negative.'))
|
||||
|
||||
# Query the database to get the current set of CPUs and then
|
||||
# organize the data by socket and function for convenience.
|
||||
ihost.cpus = pecan.request.dbapi.icpu_get_by_ihost(ihost.uuid)
|
||||
# Ensure that the cpulist is valid if set
|
||||
if specified_cpulist:
|
||||
# make a list of CPU numbers (which are not necessarily contiguous)
|
||||
host_cpus = [ihost_cpu.cpu for ihost_cpu in ihost.cpus]
|
||||
cpulist = cutils.parse_range_set(specified_cpulist)
|
||||
if max(cpulist) > max(host_cpus):
|
||||
raise wsme.exc.ClientSideError(
|
||||
_('Specified cpulist contains nonexistant CPUs.'))
|
||||
|
||||
# organize the cpus by socket and function for convenience.
|
||||
cpu_utils.restructure_host_cpu_data(ihost)
|
||||
|
||||
# Get the CPU counts for each socket and function for this host
|
||||
cpu_counts = cpu_utils.get_cpu_counts(ihost)
|
||||
|
||||
# Update the CPU counts based on the provided values
|
||||
cpu_lists = {}
|
||||
|
||||
# Update the CPU counts and cpulists based on the provided values
|
||||
for cap in capabilities:
|
||||
function = cap.get('function', None)
|
||||
# Normalize the function input
|
||||
for const_function in constants.CPU_FUNCTIONS:
|
||||
if const_function.lower() == function.lower():
|
||||
function = const_function
|
||||
sockets = cap.get('sockets', None)
|
||||
sockets = cap.get('sockets', [])
|
||||
# If this function is specified via cpulist, reset count to zero.
|
||||
if not sockets:
|
||||
for numa_node in cpu_counts:
|
||||
cpu_counts[numa_node][function] = 0
|
||||
for numa in sockets:
|
||||
numa_node, value = numa.items()[0]
|
||||
numa_node = int(numa_node)
|
||||
|
@ -286,11 +314,18 @@ class HostStatesController(rest.RestController):
|
|||
value *= 2
|
||||
cpu_counts[numa_node][function] = value
|
||||
|
||||
# Store the cpu ranges per CPU function as well if any exist
|
||||
cpu_range = cap.get('cpulist', None)
|
||||
cpu_list = cutils.parse_range_set(cpu_range)
|
||||
# Uncomment the following line to add any missing HT siblings
|
||||
# cpu_list = cpu_utils.append_ht_sibling(ihost, cpu_list)
|
||||
cpu_lists[function] = cpu_list
|
||||
|
||||
# Semantic check to ensure the minimum/maximum values are enforced
|
||||
cpu_utils.check_core_allocations(ihost, cpu_counts)
|
||||
cpu_utils.check_core_allocations(ihost, cpu_counts, cpu_lists)
|
||||
|
||||
# Update cpu assignments to new values
|
||||
cpu_utils.update_core_allocations(ihost, cpu_counts)
|
||||
cpu_utils.update_core_allocations(ihost, cpu_counts, cpu_lists)
|
||||
|
||||
for cpu in ihost.cpus:
|
||||
function = cpu_utils.get_cpu_function(ihost, cpu)
|
||||
|
@ -2068,17 +2103,6 @@ class HostController(rest.RestController):
|
|||
ihost_ret = pecan.request.rpcapi.configure_ihost(
|
||||
pecan.request.context, ihost_obj)
|
||||
|
||||
# Trigger a system app reapply if the host has been unlocked.
|
||||
# Only trigger the reapply if it is not during restore and the
|
||||
# openstack app is applied
|
||||
if (cutils.is_openstack_applied(pecan.request.dbapi) and
|
||||
not os.path.isfile(tsc.RESTORE_IN_PROGRESS_FLAG) and
|
||||
patched_ihost.get('action') in
|
||||
[constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION]):
|
||||
pecan.request.rpcapi.evaluate_app_reapply(
|
||||
pecan.request.context,
|
||||
constants.HELM_APP_OPENSTACK)
|
||||
|
||||
pecan.request.dbapi.ihost_update(
|
||||
ihost_obj['uuid'], {'capabilities': ihost_obj['capabilities']})
|
||||
|
||||
|
@ -2087,6 +2111,18 @@ class HostController(rest.RestController):
|
|||
|
||||
hostupdate.notify_mtce = True
|
||||
|
||||
# Evaluate app reapply on lock/unlock/swact/reinstall
|
||||
if (not os.path.isfile(tsc.RESTORE_IN_PROGRESS_FLAG) and
|
||||
patched_ihost.get('action') in
|
||||
[constants.LOCK_ACTION, constants.FORCE_LOCK_ACTION,
|
||||
constants.UNLOCK_ACTION, constants.FORCE_UNLOCK_ACTION,
|
||||
constants.SWACT_ACTION, constants.FORCE_SWACT_ACTION,
|
||||
constants.REINSTALL_ACTION]):
|
||||
pecan.request.rpcapi.evaluate_apps_reapply(
|
||||
pecan.request.context,
|
||||
trigger={'type': patched_ihost.get('action'),
|
||||
'configure_required': True if hostupdate.configure_required else False})
|
||||
|
||||
pecan.request.dbapi.ihost_update(ihost_obj['uuid'],
|
||||
{'capabilities': ihost_obj['capabilities']})
|
||||
|
||||
|
@ -2120,6 +2156,11 @@ class HostController(rest.RestController):
|
|||
new_ihost_mtc['action'] = constants.UNLOCK_ACTION
|
||||
|
||||
if new_ihost_mtc['operation'] == 'add':
|
||||
# Evaluate apps reapply on new host
|
||||
pecan.request.rpcapi.evaluate_apps_reapply(
|
||||
pecan.request.context,
|
||||
trigger={'type': constants.APP_EVALUATE_REAPPLY_TYPE_HOST_ADD})
|
||||
|
||||
mtc_response = mtce_api.host_add(
|
||||
self._api_token, self._mtc_address, self._mtc_port,
|
||||
new_ihost_mtc,
|
||||
|
@ -2450,12 +2491,14 @@ class HostController(rest.RestController):
|
|||
ceph_mons[0].uuid, {'device_path': None}
|
||||
)
|
||||
|
||||
remove_from_cluster = True if ihost.invprovision == constants.PROVISIONED else False
|
||||
|
||||
# Delete the stor entries associated with this host
|
||||
istors = pecan.request.dbapi.istor_get_by_ihost(ihost['uuid'])
|
||||
|
||||
for stor in istors:
|
||||
try:
|
||||
self.istors.delete_stor(stor.uuid)
|
||||
self.istors.delete_stor(stor.uuid, remove_from_cluster)
|
||||
except Exception as e:
|
||||
# Do not destroy the ihost if the stor cannot be deleted.
|
||||
LOG.exception(e)
|
||||
|
@ -2529,19 +2572,12 @@ class HostController(rest.RestController):
|
|||
|
||||
pecan.request.dbapi.ihost_destroy(ihost_id)
|
||||
|
||||
# Check if platform apps need to be reapplied
|
||||
if personality == constants.CONTROLLER:
|
||||
for app_name in constants.HELM_APPS_PLATFORM_MANAGED:
|
||||
if cutils.is_app_applied(pecan.request.dbapi, app_name):
|
||||
pecan.request.rpcapi.evaluate_app_reapply(
|
||||
pecan.request.context, app_name)
|
||||
|
||||
# If the host being removed was an openstack worker node, check to see
|
||||
# if a reapply is needed
|
||||
if openstack_worker and cutils.is_app_applied(
|
||||
pecan.request.dbapi, constants.HELM_APP_OPENSTACK):
|
||||
pecan.request.rpcapi.evaluate_app_reapply(
|
||||
pecan.request.context, constants.HELM_APP_OPENSTACK)
|
||||
# Check if platform apps need to be reapplied after host delete
|
||||
pecan.request.rpcapi.evaluate_apps_reapply(
|
||||
pecan.request.context,
|
||||
trigger={'type': constants.APP_EVALUATE_REAPPLY_TYPE_HOST_DELETE,
|
||||
'openstack_worker': True if openstack_worker else False,
|
||||
'personality': personality})
|
||||
|
||||
def _notify_mtce_host_delete(self, ihost):
|
||||
|
||||
|
@ -2621,6 +2657,11 @@ class HostController(rest.RestController):
|
|||
loads = pecan.request.dbapi.load_get_list()
|
||||
new_target_load = cutils.get_imported_load(loads)
|
||||
rpc_ihost = objects.host.get_by_uuid(pecan.request.context, uuid)
|
||||
|
||||
if rpc_ihost.personality == constants.EDGEWORKER:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"host-upgrade rejected: Not supported for EDGEWORKER node."))
|
||||
|
||||
simplex = (utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX)
|
||||
# If this is a simplex system skip this check, there's no other nodes
|
||||
if simplex:
|
||||
|
@ -2701,6 +2742,10 @@ class HostController(rest.RestController):
|
|||
new_target_load = cutils.get_active_load(loads)
|
||||
rpc_ihost = objects.host.get_by_uuid(pecan.request.context, uuid)
|
||||
|
||||
if rpc_ihost.personality == constants.EDGEWORKER:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"host-downgrade rejected: Not supported for EDGEWORKER node."))
|
||||
|
||||
disable_storage_monitor = False
|
||||
|
||||
simplex = (utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX)
|
||||
|
@ -2724,8 +2769,14 @@ class HostController(rest.RestController):
|
|||
self._semantic_check_rollback()
|
||||
if StorageBackendConfig.has_backend_configured(
|
||||
pecan.request.dbapi, constants.CINDER_BACKEND_CEPH):
|
||||
disable_storage_monitor = True
|
||||
open(tsc.UPGRADE_ROLLBACK_FLAG, "w").close()
|
||||
# elif block ensures this is a duplex env.
|
||||
# We do not set disable_storage_monitor True for AIO-DX
|
||||
if not cutils.is_aio_duplex_system(pecan.request.dbapi):
|
||||
disable_storage_monitor = True
|
||||
# the upgrade rollback flag can only be created by root so
|
||||
# send an rpc request to sysinv-conductor to create the flag
|
||||
pecan.request.rpcapi.update_controller_rollback_flag(
|
||||
pecan.request.context)
|
||||
elif rpc_ihost.hostname == constants.CONTROLLER_1_HOSTNAME:
|
||||
self._check_host_load(constants.CONTROLLER_0_HOSTNAME,
|
||||
new_target_load)
|
||||
|
@ -2761,24 +2812,6 @@ class HostController(rest.RestController):
|
|||
"host-downgrade rejected: Upgrade not in %s state." %
|
||||
constants.UPGRADE_ABORTING))
|
||||
|
||||
if rpc_ihost.hostname == constants.CONTROLLER_1_HOSTNAME:
|
||||
# Clear upgrade flags so controller-1 will not upgrade
|
||||
# after install. This flag is guaranteed to be written on
|
||||
# controller-0, since controller-1 must be locked to run
|
||||
# the host-downgrade command.
|
||||
try:
|
||||
os.remove(tsc.CONTROLLER_UPGRADE_FLAG)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade flag")
|
||||
try:
|
||||
os.remove(tsc.CONTROLLER_UPGRADE_COMPLETE_FLAG)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade complete flag")
|
||||
try:
|
||||
os.remove(tsc.CONTROLLER_UPGRADE_FAIL_FLAG)
|
||||
except OSError:
|
||||
LOG.exception("Failed to remove upgrade fail flag")
|
||||
|
||||
# Check for new hardware since upgrade-start
|
||||
force = body.get('force', False) is True
|
||||
self._semantic_check_downgrade_refresh(upgrade, rpc_ihost, force)
|
||||
|
@ -2792,6 +2825,11 @@ class HostController(rest.RestController):
|
|||
pecan.request.rpcapi.kill_ceph_storage_monitor(
|
||||
pecan.request.context)
|
||||
|
||||
# Remove the host manifest. This is similar to the process taken
|
||||
# during host-reinstall. The manifest needs to be removed to prevent
|
||||
# the host from running kubeadm prematurely.
|
||||
pecan.request.rpcapi.remove_host_config(pecan.request.context, uuid)
|
||||
|
||||
self._update_load(uuid, body, new_target_load)
|
||||
|
||||
return Host.convert_with_links(rpc_ihost)
|
||||
|
@ -3007,7 +3045,9 @@ class HostController(rest.RestController):
|
|||
|
||||
def _validate_hostname(self, hostname, personality):
|
||||
|
||||
if personality and personality == constants.WORKER:
|
||||
if personality and \
|
||||
(personality == constants.WORKER or
|
||||
personality == constants.EDGEWORKER):
|
||||
# Fix of invalid hostnames
|
||||
err_tl = 'Name restricted to at most 255 characters.'
|
||||
err_ic = 'Name may only contain letters, ' \
|
||||
|
@ -3357,41 +3397,61 @@ class HostController(rest.RestController):
|
|||
|
||||
self._check_sriovdp_interface_datanets(interface)
|
||||
|
||||
def _semantic_check_fpga_fec_device(self, host, dev, force_unlock=False):
|
||||
def _semantic_check_acclr_fec_device(self, host, dev, force_unlock=False):
|
||||
"""
|
||||
Perform semantic checks on an FPGA FEC device.
|
||||
Perform semantic checks on an FEC device.
|
||||
"""
|
||||
if (force_unlock or
|
||||
dev.pdevice_id != device.PCI_DEVICE_ID_FPGA_INTEL_5GNR_FEC_PF):
|
||||
dev.pdevice_id not in device.SRIOV_ENABLED_FEC_DEVICE_IDS):
|
||||
return
|
||||
|
||||
sriov_numvfs = dev.sriov_numvfs
|
||||
if not sriov_numvfs:
|
||||
return
|
||||
if (dev.sriov_vfs_pci_address and
|
||||
sriov_numvfs == len(dev.sriov_vfs_pci_address.split(','))):
|
||||
try:
|
||||
sriov_numvfs = int(dev.sriov_numvfs)
|
||||
except TypeError:
|
||||
sriov_numvfs = 0
|
||||
|
||||
if dev.extra_info:
|
||||
extra_info = ast.literal_eval(dev.extra_info)
|
||||
expected_numvfs = int(extra_info['expected_numvfs'])
|
||||
if sriov_numvfs != expected_numvfs:
|
||||
msg = (_("Expecting sriov_numvfs=%d for FEC device pciaddr=%s. "
|
||||
"Please wait a few minutes for inventory update and "
|
||||
"retry host-unlock." % (expected_numvfs, dev.pciaddr)))
|
||||
LOG.info(msg)
|
||||
pecan.request.rpcapi.update_sriov_config(
|
||||
pecan.request.context,
|
||||
host['uuid'])
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
if not dev.sriov_vfs_pci_address or len(dev.sriov_vfs_pci_address) == 0:
|
||||
sriov_vfs_pci_address = []
|
||||
else:
|
||||
sriov_vfs_pci_address = dev.sriov_vfs_pci_address.split(',')
|
||||
|
||||
if sriov_numvfs == len(sriov_vfs_pci_address):
|
||||
if sriov_numvfs > 0:
|
||||
LOG.info("check sriov_numvfs=%s sriov_vfs_pci_address=%s" %
|
||||
(sriov_numvfs, dev.sriov_vfs_pci_address))
|
||||
else:
|
||||
msg = (_("Expecting number of FPGA device sriov_numvfs=%s. "
|
||||
"Please wait a few minutes for inventory update and "
|
||||
"retry host-unlock." %
|
||||
sriov_numvfs))
|
||||
msg = (_("Expecting sriov_vfs_pci_address length=%d for FEC "
|
||||
"device pciaddr=%s. Please wait a few minutes for "
|
||||
"inventory update and retry host-unlock." %
|
||||
(sriov_numvfs, dev.pciaddr)))
|
||||
LOG.info(msg)
|
||||
pecan.request.rpcapi.update_sriov_config(
|
||||
pecan.request.context,
|
||||
host['uuid'])
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
def _semantic_check_fpga_device(self, host, dev, force_unlock=False):
|
||||
def _semantic_check_acclr_device(self, host, dev, force_unlock=False):
|
||||
"""
|
||||
Perform semantic checks on an FPGA device.
|
||||
Perform semantic checks on an FEC device.
|
||||
"""
|
||||
if dev.pclass_id != device.PCI_DEVICE_CLASS_FPGA:
|
||||
return
|
||||
|
||||
if dev.pdevice_id == device.PCI_DEVICE_ID_FPGA_INTEL_5GNR_FEC_PF:
|
||||
self._semantic_check_fpga_fec_device(host, dev, force_unlock)
|
||||
if dev.pdevice_id in device.SRIOV_ENABLED_FEC_DEVICE_IDS:
|
||||
self._semantic_check_acclr_fec_device(host, dev, force_unlock)
|
||||
|
||||
def _semantic_check_devices(self, host, force_unlock=False):
|
||||
"""
|
||||
|
@ -3401,7 +3461,7 @@ class HostController(rest.RestController):
|
|||
pecan.request.dbapi.pci_device_get_by_host(host['uuid']))
|
||||
for dev in devices:
|
||||
if dev.pclass_id == device.PCI_DEVICE_CLASS_FPGA:
|
||||
self._semantic_check_fpga_device(host, dev, force_unlock)
|
||||
self._semantic_check_acclr_device(host, dev, force_unlock)
|
||||
|
||||
def _semantic_check_unlock_kube_upgrade(self, ihost, force_unlock=False):
|
||||
"""
|
||||
|
@ -3447,6 +3507,29 @@ class HostController(rest.RestController):
|
|||
# Check for new hardware since upgrade-start
|
||||
self._semantic_check_upgrade_refresh(upgrade, ihost, force_unlock)
|
||||
|
||||
@staticmethod
|
||||
def _semantic_check_duplex_oam_config(ihost):
|
||||
system = pecan.request.dbapi.isystem_get_one()
|
||||
if system.capabilities.get('simplex_to_duplex_migration'):
|
||||
network = pecan.request.dbapi.network_get_by_type(constants.NETWORK_TYPE_OAM)
|
||||
address_names = {'oam_c0_ip': '%s-%s' % (constants.CONTROLLER_0_HOSTNAME,
|
||||
constants.NETWORK_TYPE_OAM),
|
||||
'oam_c1_ip': '%s-%s' % (constants.CONTROLLER_1_HOSTNAME,
|
||||
constants.NETWORK_TYPE_OAM)}
|
||||
addresses = {a['name']: a for a in
|
||||
pecan.request.dbapi.addresses_get_by_pool_uuid(network.pool_uuid)}
|
||||
|
||||
# check if controller-0-oam and controller-1-oam entries exist
|
||||
for key, name in address_names.items():
|
||||
if addresses.get(name) is None:
|
||||
msg = _("Can not unlock controller on a duplex without "
|
||||
"configuring %s." % key)
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
if addresses[name].address is None:
|
||||
msg = _("Can not unlock controller on a duplex without "
|
||||
"configuring a unit IP for %s." % key)
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
@staticmethod
|
||||
def _semantic_check_oam_interface(ihost):
|
||||
"""
|
||||
|
@ -3576,6 +3659,8 @@ class HostController(rest.RestController):
|
|||
|
||||
# Make adjustment to 2M and 1G hugepages to accomodate an
|
||||
# increase in platform reserved memory.
|
||||
# Also, consider the available memory on the calculation
|
||||
# hupeages shall not be decreased if there is memory available
|
||||
for m in mems:
|
||||
# ignore updates when no change required
|
||||
if m.platform_reserved_mib is None or \
|
||||
|
@ -3589,45 +3674,68 @@ class HostController(rest.RestController):
|
|||
continue
|
||||
|
||||
# start with current measured hugepage
|
||||
n_total_hp_size = 0
|
||||
if m.vswitch_hugepages_reqd is not None:
|
||||
n_total_hp_size += m.vswitch_hugepages_reqd \
|
||||
* m.vswitch_hugepages_size_mib
|
||||
else:
|
||||
n_total_hp_size += m.vswitch_hugepages_nr \
|
||||
* m.vswitch_hugepages_size_mib
|
||||
if m.vm_hugepages_nr_2M is not None:
|
||||
n_2M = m.vm_hugepages_nr_2M
|
||||
n_total_hp_size += n_2M * constants.MIB_2M
|
||||
else:
|
||||
n_2M = None
|
||||
if m.vm_hugepages_nr_1G is not None:
|
||||
n_1G = m.vm_hugepages_nr_1G
|
||||
n_total_hp_size += n_1G * constants.MIB_1G
|
||||
else:
|
||||
n_1G = None
|
||||
|
||||
# adjust current measurements
|
||||
d_MiB = reserved - m.platform_reserved_mib
|
||||
d_2M = int(d_MiB / constants.MIB_2M)
|
||||
d_1G = int((d_MiB + 512) / constants.MIB_1G)
|
||||
if n_2M is not None and n_2M - d_2M > 0:
|
||||
d_1G = 0
|
||||
n_2M -= d_2M
|
||||
else:
|
||||
d_2M = 0
|
||||
if n_1G is not None and n_1G - d_1G > 0:
|
||||
n_1G -= d_1G
|
||||
else:
|
||||
d_1G = 0
|
||||
|
||||
# override with pending values
|
||||
if m.vm_hugepages_nr_2M_pending is not None:
|
||||
n_2M = m.vm_hugepages_nr_2M_pending
|
||||
if m.vm_hugepages_nr_1G_pending is not None:
|
||||
n_1G = m.vm_hugepages_nr_1G_pending
|
||||
|
||||
# adjust current hugepage measurements based on the available
|
||||
# memory
|
||||
hp_mem_avail_mib = m.node_memtotal_mib - reserved \
|
||||
- int(n_total_hp_size)
|
||||
values = {}
|
||||
values.update({'platform_reserved_mib': reserved})
|
||||
if n_2M is not None:
|
||||
values.update({'vm_hugepages_nr_2M_pending': n_2M})
|
||||
if n_1G is not None:
|
||||
values.update({'vm_hugepages_nr_1G_pending': n_1G})
|
||||
LOG.info("%s auto_adjust_memory numa_node=%d, "
|
||||
"+2M=%d, +1G=%d, values=%s"
|
||||
% (ihost['hostname'], node['numa_node'],
|
||||
-d_2M, -d_1G, values))
|
||||
|
||||
# Only adjust the number of hugepages if the memory available is
|
||||
# less than 50% of the total memory
|
||||
if (hp_mem_avail_mib < int(0.5 * m.node_memtotal_mib)):
|
||||
d_MiB = reserved - m.platform_reserved_mib
|
||||
d_2M = int(d_MiB / constants.MIB_2M)
|
||||
d_1G = int((d_MiB + 512) / constants.MIB_1G)
|
||||
if n_2M is not None and n_2M - d_2M > 0:
|
||||
d_1G = 0
|
||||
n_2M -= d_2M
|
||||
else:
|
||||
d_2M = 0
|
||||
if n_1G is not None and n_1G - d_1G > 0:
|
||||
n_1G -= d_1G
|
||||
else:
|
||||
d_1G = 0
|
||||
|
||||
# override with pending values
|
||||
if m.vm_hugepages_nr_2M_pending is not None:
|
||||
n_2M = m.vm_hugepages_nr_2M_pending
|
||||
if m.vm_hugepages_nr_1G_pending is not None:
|
||||
n_1G = m.vm_hugepages_nr_1G_pending
|
||||
|
||||
if n_2M is not None:
|
||||
values.update({'vm_hugepages_nr_2M_pending': n_2M})
|
||||
if n_1G is not None:
|
||||
values.update({'vm_hugepages_nr_1G_pending': n_1G})
|
||||
LOG.info("%s auto_adjust_memory numa_node=%d, "
|
||||
"+2M=%d, +1G=%d, values=%s"
|
||||
% (ihost['hostname'], node['numa_node'],
|
||||
-d_2M, -d_1G, values))
|
||||
else:
|
||||
LOG.info("%s auto_adjust_memory numa_node=%d, "
|
||||
"number of app hugepages preserved: "
|
||||
"2M=%d, 1G=%d, values=%s, "
|
||||
"available memory (MB): %d"
|
||||
% (ihost['hostname'], node['numa_node'],
|
||||
n_2M, n_1G, values, hp_mem_avail_mib))
|
||||
pecan.request.dbapi.imemory_update(m.uuid, values)
|
||||
|
||||
return None
|
||||
|
@ -4419,6 +4527,23 @@ class HostController(rest.RestController):
|
|||
pecan.request.rpcapi.configure_ttys_dcd(
|
||||
pecan.request.context, ihost['uuid'], ttys_dcd)
|
||||
|
||||
def mtc_action_apps_semantic_checks(self, action):
|
||||
""" Enhance semantic checks from this class.
|
||||
Let apps run semantic checks.
|
||||
|
||||
:param action: maintenance action
|
||||
|
||||
"""
|
||||
try:
|
||||
pecan.request.rpcapi.mtc_action_apps_semantic_checks(
|
||||
pecan.request.context, action)
|
||||
except rpc_common.RemoteError as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"{} action semantic check failed by app: {}"
|
||||
"".format(action.capitalize(), str(e.value))))
|
||||
|
||||
return True
|
||||
|
||||
def action_check(self, action, hostupdate):
|
||||
"""Performs semantic checks related to action"""
|
||||
|
||||
|
@ -4489,8 +4614,13 @@ class HostController(rest.RestController):
|
|||
pecan.request.dbapi.ihost_update(hostupdate.ihost_orig['uuid'],
|
||||
hostupdate.ihost_val_prenotify)
|
||||
raise
|
||||
|
||||
if not force_unlock:
|
||||
self.mtc_action_apps_semantic_checks(action)
|
||||
|
||||
elif action == constants.LOCK_ACTION:
|
||||
if self.check_lock(hostupdate):
|
||||
if self.check_lock(hostupdate) and \
|
||||
self.mtc_action_apps_semantic_checks(action):
|
||||
rc = self.update_ihost_action(action, hostupdate)
|
||||
elif action == constants.FORCE_LOCK_ACTION:
|
||||
if self.check_force_lock(hostupdate):
|
||||
|
@ -5042,6 +5172,9 @@ class HostController(rest.RestController):
|
|||
cutils.is_aio_duplex_system(pecan.request.dbapi):
|
||||
return
|
||||
|
||||
if personality == constants.EDGEWORKER:
|
||||
return
|
||||
|
||||
if (utils.SystemHelper.get_product_build() ==
|
||||
constants.TIS_AIO_BUILD):
|
||||
msg = _("Personality [%s] for host is not compatible "
|
||||
|
@ -5508,6 +5641,8 @@ class HostController(rest.RestController):
|
|||
# If HTTPS is enabled then we may be in TPM configuration mode
|
||||
if utils.get_https_enabled():
|
||||
self._semantic_check_tpm_config(hostupdate.ihost_orig)
|
||||
if utils.get_system_mode() == constants.SYSTEM_MODE_DUPLEX:
|
||||
self._semantic_check_duplex_oam_config(hostupdate.ihost_orig)
|
||||
|
||||
def check_unlock_worker(self, hostupdate, force_unlock=False):
|
||||
"""Check semantics on host-unlock of a worker."""
|
||||
|
@ -5833,6 +5968,18 @@ class HostController(rest.RestController):
|
|||
_("Swact action not allowed. Upgrade state must be %s") %
|
||||
(constants.UPGRADE_DATA_MIGRATION_COMPLETE))
|
||||
|
||||
activating_states = [constants.UPGRADE_ACTIVATION_REQUESTED,
|
||||
constants.UPGRADE_ACTIVATING]
|
||||
if upgrade.state in activating_states and not force_swact:
|
||||
# Block swacts during activation to prevent interrupting the
|
||||
# upgrade scripts.
|
||||
# Allow swacts during UPGRADE_ACTIVATING_HOSTS as the active
|
||||
# controller may need a lock/unlock if a runtime manifest fails.
|
||||
# Allow force swacts for recovery in edge cases.
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Swact action not allowed. Wait until the upgrade-activate "
|
||||
"command completes"))
|
||||
|
||||
if upgrade.state in [constants.UPGRADE_ABORTING,
|
||||
constants.UPGRADE_ABORTING_ROLLBACK]:
|
||||
if to_host_load_id == upgrade.to_load:
|
||||
|
@ -5937,22 +6084,29 @@ class HostController(rest.RestController):
|
|||
self._check_swact_device_image_update(hostupdate.ihost_orig,
|
||||
ihost_ctr, force_swact)
|
||||
|
||||
if ihost_ctr.config_target:
|
||||
if ihost_ctr.config_target != ihost_ctr.config_applied:
|
||||
try:
|
||||
upgrade = \
|
||||
pecan.request.dbapi.software_upgrade_get_one()
|
||||
except exception.NotFound:
|
||||
upgrade = None
|
||||
if upgrade and upgrade.state == \
|
||||
constants.UPGRADE_ABORTING_ROLLBACK:
|
||||
pass
|
||||
else:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("%s target Config %s not yet applied."
|
||||
" Apply target Config via Lock/Unlock prior"
|
||||
" to Swact") %
|
||||
(ihost_ctr.hostname, ihost_ctr.config_target))
|
||||
if ihost_ctr.config_target and\
|
||||
ihost_ctr.config_target != ihost_ctr.config_applied:
|
||||
try:
|
||||
upgrade = \
|
||||
pecan.request.dbapi.software_upgrade_get_one()
|
||||
except exception.NotFound:
|
||||
upgrade = None
|
||||
if upgrade and upgrade.state == \
|
||||
constants.UPGRADE_ABORTING_ROLLBACK:
|
||||
pass
|
||||
elif not utils.is_host_active_controller(ihost_ctr):
|
||||
# This condition occurs when attempting to host-swact
|
||||
# away from "active" (platform services) controller.
|
||||
#
|
||||
# Since api (sysinv, sm) allows for host-swact
|
||||
# services away from a "standby" controller, this enforcement
|
||||
# is not required for host-swact to the already
|
||||
# active controller.
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("%s target Config %s not yet applied."
|
||||
" Apply target Config via Lock/Unlock prior"
|
||||
" to Swact") %
|
||||
(ihost_ctr.hostname, ihost_ctr.config_target))
|
||||
|
||||
self._semantic_check_swact_upgrade(hostupdate.ihost_orig,
|
||||
ihost_ctr,
|
||||
|
@ -5980,6 +6134,14 @@ class HostController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(
|
||||
_("%s" % response['error_details']))
|
||||
|
||||
# Check no app apply is in progress
|
||||
# Skip if it is a force swact
|
||||
if force_swact is False:
|
||||
for _app in pecan.request.dbapi.kube_app_get_all():
|
||||
if _app.status == constants.APP_APPLY_IN_PROGRESS:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Swact action not allowed. %s apply is in progress." % _app.name))
|
||||
|
||||
def check_lock_storage(self, hostupdate, force=False):
|
||||
"""Pre lock semantic checks for storage"""
|
||||
LOG.info("%s ihost check_lock_storage" % hostupdate.displayid)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
import json
|
||||
|
||||
from oslo_log import log
|
||||
from sysinv.api.controllers.v1.rest_api import rest_api_request
|
||||
from sysinv.common.rest_api import rest_api_request
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -144,6 +144,9 @@ class Interface(base.APIBase):
|
|||
txhashpolicy = wtypes.text
|
||||
"Represent the txhashpolicy of the interface"
|
||||
|
||||
primary_reselect = wtypes.text
|
||||
"Represent the primary_reselect mode of the interface"
|
||||
|
||||
ifcapabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
|
||||
six.integer_types)}
|
||||
"This interface's meta data"
|
||||
|
@ -193,6 +196,9 @@ class Interface(base.APIBase):
|
|||
ptp_role = wtypes.text
|
||||
"The PTP role for this interface"
|
||||
|
||||
max_tx_rate = int
|
||||
"The value of configured max tx rate of VF, Mbps"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.fields = list(objects.interface.fields.keys())
|
||||
for k in self.fields:
|
||||
|
@ -217,7 +223,8 @@ class Interface(base.APIBase):
|
|||
'aemode', 'schedpolicy', 'txhashpolicy',
|
||||
'vlan_id', 'uses', 'usesmodify', 'used_by',
|
||||
'ipv4_mode', 'ipv6_mode', 'ipv4_pool', 'ipv6_pool',
|
||||
'sriov_numvfs', 'sriov_vf_driver', 'ptp_role'])
|
||||
'sriov_numvfs', 'sriov_vf_driver', 'ptp_role',
|
||||
'max_tx_rate', 'primary_reselect'])
|
||||
|
||||
# never expose the ihost_id attribute
|
||||
interface.ihost_id = wtypes.Unset
|
||||
|
@ -402,9 +409,6 @@ class InterfaceController(rest.RestController):
|
|||
except exception.SysinvException as e:
|
||||
LOG.exception(e)
|
||||
raise wsme.exc.ClientSideError(str(e))
|
||||
except exception.HTTPNotFound:
|
||||
raise wsme.exc.ClientSideError(_("Interface create failed: interface %s"
|
||||
% (interface['ifname'])))
|
||||
return Interface.convert_with_links(new_interface)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
|
@ -672,10 +676,12 @@ def _set_defaults(interface):
|
|||
defaults = {'imtu': DEFAULT_MTU,
|
||||
'aemode': constants.AE_MODE_ACTIVE_STANDBY,
|
||||
'txhashpolicy': None,
|
||||
'primary_reselect': None,
|
||||
'vlan_id': None,
|
||||
'sriov_numvfs': 0,
|
||||
'sriov_vf_driver': None,
|
||||
'ptp_role': constants.INTERFACE_PTP_ROLE_NONE}
|
||||
'ptp_role': constants.INTERFACE_PTP_ROLE_NONE,
|
||||
'max_tx_rate': None}
|
||||
|
||||
if interface['ifclass'] == constants.INTERFACE_CLASS_DATA:
|
||||
defaults['ipv4_mode'] = constants.IPV4_DISABLED
|
||||
|
@ -768,26 +774,12 @@ def _check_interface_mtu(interface, ihost, from_profile=False):
|
|||
return interface
|
||||
|
||||
|
||||
def _get_host_mgmt_interface(ihost):
|
||||
for iface in pecan.request.dbapi.iinterface_get_by_ihost(ihost['id']):
|
||||
for ni in pecan.request.dbapi.interface_network_get_by_interface(iface['id']):
|
||||
network = pecan.request.dbapi.network_get(ni.network_id)
|
||||
if network.type == constants.NETWORK_TYPE_MGMT:
|
||||
return iface
|
||||
return None
|
||||
|
||||
|
||||
def _check_interface_sriov(interface, ihost, from_profile=False):
|
||||
sriov_update = False
|
||||
|
||||
if 'ifclass' in interface.keys() and not interface['ifclass']:
|
||||
return sriov_update
|
||||
|
||||
if (interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV and
|
||||
_get_host_mgmt_interface(ihost) is None):
|
||||
raise wsme.exc.ClientSideError(_("Unable to provision pci-sriov interface "
|
||||
"without configured mgmt interface."))
|
||||
|
||||
if (interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV and
|
||||
'sriov_numvfs' not in interface.keys()):
|
||||
raise wsme.exc.ClientSideError(_("A network type of pci-sriov must specify "
|
||||
|
@ -880,6 +872,11 @@ def _check_interface_class_transition(interface, existing_interface):
|
|||
existing_ifclass = existing_interface['ifclass']
|
||||
if ifclass == existing_ifclass:
|
||||
return
|
||||
# to share single vf capable nic, we need to allow
|
||||
# platform to pci-sriov class transition
|
||||
if (ifclass == constants.INTERFACE_CLASS_PCI_SRIOV and
|
||||
existing_ifclass == constants.INTERFACE_CLASS_PLATFORM):
|
||||
return
|
||||
if (ifclass and
|
||||
existing_interface[
|
||||
'ifclass'] == constants.INTERFACE_CLASS_PLATFORM and
|
||||
|
@ -1019,6 +1016,48 @@ def _check_network_type_and_port(interface, ihost,
|
|||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
def _check_interface_ratelimit(interface):
|
||||
# Ensure rate limit is valid for VF interfaces
|
||||
if interface['max_tx_rate'] is not None:
|
||||
if not str(interface['max_tx_rate']).isdigit():
|
||||
msg = _("max_tx_rate must be an integer value.")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
if interface['iftype'] != constants.INTERFACE_TYPE_VF:
|
||||
msg = _("max_tx_rate is only allowed to be configured for VF interfaces")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
# check if an overcommitted config
|
||||
max_tx_rate = interface['max_tx_rate']
|
||||
|
||||
ihost_uuid = interface['ihost_uuid']
|
||||
lower_ifname = interface['uses'][0]
|
||||
lower_iface = (
|
||||
pecan.request.dbapi.iinterface_get(lower_ifname, ihost_uuid))
|
||||
|
||||
ports = pecan.request.dbapi.ethernet_port_get_by_interface(
|
||||
lower_iface['uuid'])
|
||||
if len(ports) > 0 and ports[0]['speed'] is not None:
|
||||
# keep 10% of the bandwidth for PF traffic
|
||||
total_rate_for_vf = int(ports[0]['speed'] * constants.VF_TOTAL_RATE_RATIO)
|
||||
total_rate_used = 0
|
||||
this_interface_id = interface.get('id', 0)
|
||||
interface_list = pecan.request.dbapi.iinterface_get_all(
|
||||
forihostid=ihost_uuid)
|
||||
for i in interface_list:
|
||||
if (i['iftype'] == constants.INTERFACE_TYPE_VF and
|
||||
lower_ifname == i['uses'][0] and
|
||||
i.id != this_interface_id):
|
||||
if i['max_tx_rate'] is not None:
|
||||
total_rate_used += i['max_tx_rate'] * i['sriov_numvfs']
|
||||
|
||||
vfs_config = interface['sriov_numvfs']
|
||||
if total_rate_used + (max_tx_rate * vfs_config) > total_rate_for_vf:
|
||||
msg = _("Configured (max_tx_rate*sriov_numvfs) exceeds "
|
||||
"available link speed bandwidth: %d Mbps." %
|
||||
(total_rate_for_vf - total_rate_used))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
def _check_interface_ptp(interface):
|
||||
# Ensure PTP settings are valid for this interface
|
||||
# Validate PTP role value
|
||||
|
@ -1046,6 +1085,30 @@ def _check_interface_ptp(interface):
|
|||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
def _check_ae_primary_reselect(interface):
|
||||
ifclass = interface['ifclass']
|
||||
iftype = interface['iftype']
|
||||
primary_reselect = interface['primary_reselect']
|
||||
aemode = interface['aemode']
|
||||
if primary_reselect is not None:
|
||||
if iftype != constants.INTERFACE_TYPE_AE:
|
||||
msg = _("The option primary_reselect is only applicable to bonded interface. ")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
if aemode != constants.AE_MODE_ACTIVE_STANDBY and primary_reselect is not None:
|
||||
msg = _("Device interface with interface type 'aggregated ethernet' "
|
||||
"in '%s' mode should not specify primary_reselect option." % aemode)
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
if (ifclass != constants.INTERFACE_CLASS_PLATFORM and
|
||||
primary_reselect != constants.PRIMARY_RESELECT_ALWAYS):
|
||||
msg = _("The option primary_reselect must be 'always' for non-platform interfaces. ")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
if primary_reselect not in constants.VALID_PRIMARY_RESELECT_LIST:
|
||||
msg = _("Invalid bonding primary reselect option: '{}'. "
|
||||
"Valid options must be one of {}".format(primary_reselect,
|
||||
', '.join(constants.VALID_PRIMARY_RESELECT_LIST)))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
def _check_interface_data(op, interface, ihost, existing_interface,
|
||||
datanetworks=None):
|
||||
# Get data
|
||||
|
@ -1107,12 +1170,14 @@ def _check_interface_data(op, interface, ihost, existing_interface,
|
|||
parent = pecan.request.dbapi.iinterface_get(p, ihost_uuid)
|
||||
if (parent.uuid in interface['uses'] or
|
||||
parent.ifname in interface['uses']):
|
||||
supported_type = [constants.INTERFACE_TYPE_VLAN,
|
||||
constants.INTERFACE_TYPE_VF]
|
||||
if i.iftype == constants.INTERFACE_TYPE_AE:
|
||||
msg = _("Interface '{}' is already used by another"
|
||||
" AE interface '{}'".format(p, i.ifname))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
elif (i.iftype == constants.INTERFACE_TYPE_VLAN and
|
||||
iftype != constants.INTERFACE_TYPE_VLAN):
|
||||
iftype not in supported_type):
|
||||
msg = _("Interface '{}' is already used by another"
|
||||
" VLAN interface '{}'".format(p, i.ifname))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
@ -1184,6 +1249,8 @@ def _check_interface_data(op, interface, ihost, existing_interface,
|
|||
"in '%s' mode should not specify a Tx Hash Policy." % aemode)
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
_check_ae_primary_reselect(interface)
|
||||
|
||||
# Make sure interface type is valid
|
||||
supported_type = [constants.INTERFACE_TYPE_AE,
|
||||
constants.INTERFACE_TYPE_VLAN,
|
||||
|
@ -1255,7 +1322,8 @@ def _check_interface_data(op, interface, ihost, existing_interface,
|
|||
|
||||
# check MTU
|
||||
if interface['iftype'] in [constants.INTERFACE_TYPE_VLAN,
|
||||
constants.INTERFACE_TYPE_VF]:
|
||||
constants.INTERFACE_TYPE_VF,
|
||||
constants.INTERFACE_TYPE_ETHERNET]:
|
||||
interface_mtu = interface['imtu']
|
||||
for name in interface['uses']:
|
||||
parent = pecan.request.dbapi.iinterface_get(name, ihost_uuid)
|
||||
|
@ -1263,7 +1331,7 @@ def _check_interface_data(op, interface, ihost, existing_interface,
|
|||
msg = _("Interface MTU (%s) cannot be larger than MTU of "
|
||||
"underlying interface (%s)" % (interface_mtu, parent['imtu']))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
elif interface['used_by']:
|
||||
if interface['used_by']:
|
||||
mtus = _get_interface_mtus(ihost_uuid, interface)
|
||||
for mtu in mtus:
|
||||
if int(interface['imtu']) < int(mtu):
|
||||
|
@ -1297,7 +1365,8 @@ def _check_interface_data(op, interface, ihost, existing_interface,
|
|||
for i in lower_iface['used_by']:
|
||||
if i != interface['ifname']:
|
||||
iface = pecan.request.dbapi.iinterface_get(i, ihost_uuid)
|
||||
avail_vfs -= iface.get('sriov_numvfs', 0)
|
||||
if iface.get('sriov_numvfs', 0):
|
||||
avail_vfs -= iface.get('sriov_numvfs')
|
||||
if interface['sriov_numvfs'] > avail_vfs:
|
||||
msg = _("The number of virtual functions (%s) must be less "
|
||||
"than or equal to the available VFs (%s) available "
|
||||
|
@ -1305,6 +1374,7 @@ def _check_interface_data(op, interface, ihost, existing_interface,
|
|||
(interface['sriov_numvfs'], avail_vfs, lower_iface['ifname']))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
_check_interface_ptp(interface)
|
||||
_check_interface_ratelimit(interface)
|
||||
|
||||
return interface
|
||||
|
||||
|
@ -1759,6 +1829,18 @@ def _create(interface, from_profile=False):
|
|||
pecan.request.dbapi.iinterface_destroy(new_interface['uuid'])
|
||||
raise e
|
||||
|
||||
if (cutils.is_aio_simplex_system(pecan.request.dbapi)
|
||||
and new_interface['iftype'] == constants.INTERFACE_TYPE_VF):
|
||||
try:
|
||||
pecan.request.rpcapi.update_sriov_vf_config(
|
||||
pecan.request.context,
|
||||
ihost['uuid'])
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
msg = _("Interface pci-sriov-vf creation failed: host %s if %s"
|
||||
% (ihost['hostname'], interface['ifname']))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
return new_interface
|
||||
|
||||
|
||||
|
@ -1766,7 +1848,21 @@ def _check(op, interface, ports=None, ifaces=None, from_profile=False,
|
|||
existing_interface=None, datanetworks=None):
|
||||
# Semantic checks
|
||||
ihost = pecan.request.dbapi.ihost_get(interface['ihost_uuid']).as_dict()
|
||||
_check_host(ihost)
|
||||
|
||||
check_host = True
|
||||
if (cutils.is_aio_simplex_system(pecan.request.dbapi)
|
||||
and interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV):
|
||||
if (op == 'modify' and interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET
|
||||
and existing_interface['ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV
|
||||
and existing_interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET):
|
||||
# user can modify interface to SR-IOV PF without host lock in AIO-SX
|
||||
check_host = False
|
||||
elif (op == 'add' and interface['iftype'] == constants.INTERFACE_TYPE_VF):
|
||||
# user can add interface SR-IOV VF without host lock in AIO-SX
|
||||
check_host = False
|
||||
|
||||
if check_host:
|
||||
_check_host(ihost)
|
||||
if not from_profile:
|
||||
if ports:
|
||||
_check_ports(op, interface, ihost, ports)
|
||||
|
@ -1777,6 +1873,22 @@ def _check(op, interface, ports=None, ifaces=None, from_profile=False,
|
|||
# Can only have one interface associated to vlan interface type
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Can only have one interface for vlan type. (%s)" % ifaces))
|
||||
if interface['iftype'] == constants.INTERFACE_TYPE_ETHERNET:
|
||||
if len(ifaces) > 1:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Can only have one lower interface for ethernet type."
|
||||
"(%s)" % ifaces))
|
||||
lower = pecan.request.dbapi.iinterface_get(ifaces[0],
|
||||
interface['ihost_uuid'])
|
||||
if not (lower['iftype'] == constants.INTERFACE_TYPE_ETHERNET
|
||||
and lower['ifclass'] ==
|
||||
constants.INTERFACE_CLASS_PCI_SRIOV):
|
||||
# Can only have pci_sriov ethernet type lower interface
|
||||
# associated to ethernet interface type
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Can only use pci-sriov ethernet interface for "
|
||||
"ethernet type. (%s)" % ifaces))
|
||||
|
||||
for i in ifaces:
|
||||
for iface in interfaces:
|
||||
if iface['uuid'] == i or iface['ifname'] == i:
|
||||
|
@ -1795,6 +1907,9 @@ def _check(op, interface, ports=None, ifaces=None, from_profile=False,
|
|||
if 'txhashpolicy' not in iface:
|
||||
iface['txhashpolicy'] = None
|
||||
|
||||
if 'primary_reselect' not in iface:
|
||||
iface['primary_reselect'] = None
|
||||
|
||||
_check_interface_data(
|
||||
"modify", iface, ihost, existing_iface, datanetworks)
|
||||
|
||||
|
@ -1856,11 +1971,18 @@ def _delete(interface, from_profile=False):
|
|||
ihost = pecan.request.dbapi.ihost_get(interface['forihostid']).as_dict()
|
||||
|
||||
if not from_profile:
|
||||
# Semantic checks
|
||||
_check_host(ihost)
|
||||
check_host = True
|
||||
if (cutils.is_aio_simplex_system(pecan.request.dbapi)
|
||||
and interface['ifclass'] == constants.INTERFACE_CLASS_PCI_SRIOV
|
||||
and interface['iftype'] == constants.INTERFACE_TYPE_VF):
|
||||
# user can delete interface SR-IOV VF without host lock in AIO-SX
|
||||
check_host = False
|
||||
|
||||
if not from_profile and interface['iftype'] == 'ethernet':
|
||||
msg = _("Cannot delete an ethernet interface type.")
|
||||
if check_host:
|
||||
_check_host(ihost)
|
||||
|
||||
if not from_profile and interface['iftype'] == 'ethernet' and not interface['uses']:
|
||||
msg = _("Cannot delete a system created ethernet interface")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
# Allow the removal of the virtual management interface during bootstrap.
|
||||
|
|
|
@ -141,7 +141,7 @@ class InterfaceDataNetworkController(rest.RestController):
|
|||
interface_datanetwork_dict['datanetwork_id'] = datanetwork_id
|
||||
|
||||
interface_obj = pecan.request.dbapi.iinterface_get(interface_uuid)
|
||||
self._check_host(interface_obj.ihost_uuid)
|
||||
self._check_host(interface_obj)
|
||||
|
||||
self._check_interface_class(interface_obj)
|
||||
self._check_interface_mtu(interface_obj, datanetwork_obj)
|
||||
|
@ -152,6 +152,10 @@ class InterfaceDataNetworkController(rest.RestController):
|
|||
result = pecan.request.dbapi.interface_datanetwork_create(
|
||||
interface_datanetwork_dict)
|
||||
|
||||
if interface_obj.ifclass == constants.INTERFACE_CLASS_PCI_SRIOV:
|
||||
pecan.request.rpcapi.update_pcidp_config(
|
||||
pecan.request.context, interface_obj.ihost_uuid)
|
||||
|
||||
return InterfaceDataNetwork.convert_with_links(result)
|
||||
|
||||
def _get_interface_datanetwork_collection(
|
||||
|
@ -211,8 +215,14 @@ class InterfaceDataNetworkController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
@staticmethod
|
||||
def _check_host(host_uuid):
|
||||
host = pecan.request.dbapi.ihost_get(host_uuid)
|
||||
def _check_host(interface_obj):
|
||||
# In general, we don't want to support changing the interface configuration
|
||||
# at runtime, allowing only this specific combination, because it can have an
|
||||
# impact on the host availability and services
|
||||
if (cutils.is_aio_simplex_system(pecan.request.dbapi)
|
||||
and interface_obj.ifclass == constants.INTERFACE_CLASS_PCI_SRIOV):
|
||||
return
|
||||
host = pecan.request.dbapi.ihost_get(interface_obj.ihost_uuid)
|
||||
if host.administrative != constants.ADMIN_LOCKED:
|
||||
msg = _("Operation Rejected: Host '%s' is adminstrative '%s' " %
|
||||
(host.hostname, host.administrative))
|
||||
|
@ -310,6 +320,9 @@ class InterfaceDataNetworkController(rest.RestController):
|
|||
interface_datanetwork_uuid)
|
||||
interface_obj = pecan.request.dbapi.iinterface_get(
|
||||
ifdn_obj.interface_uuid)
|
||||
self._check_host(interface_obj.ihost_uuid)
|
||||
self._check_host(interface_obj)
|
||||
pecan.request.dbapi.interface_datanetwork_destroy(
|
||||
interface_datanetwork_uuid)
|
||||
if interface_obj.ifclass == constants.INTERFACE_CLASS_PCI_SRIOV:
|
||||
pecan.request.rpcapi.update_pcidp_config(
|
||||
pecan.request.context, interface_obj.ihost_uuid)
|
||||
|
|
|
@ -193,6 +193,8 @@ class InterfaceNetworkController(rest.RestController):
|
|||
_update_host_mgmt_mac(host, ethernet_port_mac)
|
||||
cutils.perform_distributed_cloud_config(pecan.request.dbapi,
|
||||
interface_id)
|
||||
elif network_type == constants.NETWORK_TYPE_OAM:
|
||||
pecan.request.rpcapi.initialize_oam_config(pecan.request.context, host)
|
||||
|
||||
return InterfaceNetwork.convert_with_links(result)
|
||||
|
||||
|
@ -237,6 +239,8 @@ class InterfaceNetworkController(rest.RestController):
|
|||
return
|
||||
elif interface.ifclass == constants.INTERFACE_CLASS_PLATFORM:
|
||||
return
|
||||
elif interface.ifclass == constants.INTERFACE_CLASS_PCI_SRIOV:
|
||||
return
|
||||
else:
|
||||
msg = _("An interface with interface class '%s' "
|
||||
"cannot assign platform networks." % interface.ifclass)
|
||||
|
|
|
@ -1,19 +1,20 @@
|
|||
#
|
||||
# Copyright (c) 2018-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2018-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import base64
|
||||
import os
|
||||
import hashlib
|
||||
import pecan
|
||||
import pwd
|
||||
from pecan import rest
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from contextlib import contextmanager
|
||||
from oslo_log import log
|
||||
from sysinv._i18n import _
|
||||
from sysinv import objects
|
||||
|
@ -25,24 +26,14 @@ from sysinv.common import constants
|
|||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.common import kubernetes
|
||||
|
||||
from sysinv.helm.lifecycle_constants import LifecycleConstants
|
||||
from sysinv.helm.lifecycle_hook import LifecycleHookInfo
|
||||
from sysinv.openstack.common.rpc import common as rpc_common
|
||||
import cgcs_patch.constants as patch_constants
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def TempDirectory():
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
saved_umask = os.umask(0o077)
|
||||
try:
|
||||
yield tmpdir
|
||||
finally:
|
||||
LOG.debug("Cleaning up temp directory %s" % tmpdir)
|
||||
os.umask(saved_umask)
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
class KubeApp(base.APIBase):
|
||||
"""API representation of a containerized application."""
|
||||
|
||||
|
@ -127,6 +118,12 @@ class KubeAppController(rest.RestController):
|
|||
def __init__(self, parent=None, **kwargs):
|
||||
self._parent = parent
|
||||
|
||||
@staticmethod
|
||||
def _make_db_placeholder(prefix, url):
|
||||
url_hash = hashlib.sha256()
|
||||
url_hash.update(bytes(str(url).encode('utf-8')))
|
||||
return "{}-{}".format(prefix, url_hash.hexdigest()[:16])
|
||||
|
||||
def _check_tarfile(self, app_tarfile, app_name, app_version, operation):
|
||||
def _handle_upload_failure(reason):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
|
@ -139,9 +136,11 @@ class KubeAppController(rest.RestController):
|
|||
# take some time depending on network traffic, target server and file
|
||||
# size.
|
||||
if not app_name:
|
||||
app_name = constants.APP_NAME_PLACEHOLDER
|
||||
app_name = self._make_db_placeholder(
|
||||
constants.APP_NAME_PLACEHOLDER, app_tarfile)
|
||||
if not app_version:
|
||||
app_version = constants.APP_VERSION_PLACEHOLDER
|
||||
app_version = self._make_db_placeholder(
|
||||
constants.APP_VERSION_PLACEHOLDER, app_tarfile)
|
||||
mname = constants.APP_MANIFEST_NAME_PLACEHOLDER
|
||||
mfile = constants.APP_TARFILE_NAME_PLACEHOLDER
|
||||
return app_name, app_version, mname, mfile
|
||||
|
@ -155,7 +154,7 @@ class KubeAppController(rest.RestController):
|
|||
"{} has unrecognizable tar file extension. Supported "
|
||||
"extensions are: .tgz and .tar.gz.".format(app_tarfile))
|
||||
|
||||
with TempDirectory() as app_path:
|
||||
with cutils.TempDirectory() as app_path:
|
||||
if not cutils.extract_tarfile(app_path, app_tarfile):
|
||||
_handle_upload_failure(
|
||||
"failed to extract tar file {}.".format(os.path.basename(app_tarfile)))
|
||||
|
@ -194,20 +193,41 @@ class KubeAppController(rest.RestController):
|
|||
"""Retrieve a single application."""
|
||||
return self._get_one(app_name)
|
||||
|
||||
def _app_lifecycle_actions(self, db_app, operation, relative_timing):
|
||||
def _app_lifecycle_actions(self, db_app, hook_info):
|
||||
"""Perform lifecycle actions for application
|
||||
"""
|
||||
pecan.request.rpcapi.app_lifecycle_actions(
|
||||
pecan.request.context, db_app, operation, relative_timing)
|
||||
pecan.request.context, db_app, hook_info)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(KubeApp, body=types.apidict)
|
||||
def post(self, body):
|
||||
"""Uploading an application to be deployed by Armada"""
|
||||
tarfile = body.get('tarfile')
|
||||
tarfile_path = body.get('tarfile')
|
||||
tarfile_binary = body.get('binary_data', '')
|
||||
name = body.get('name', '')
|
||||
version = body.get('app_version', '')
|
||||
name, version, mname, mfile = self._check_tarfile(tarfile, name, version,
|
||||
images = body.get('images', False)
|
||||
|
||||
if not cutils.is_url(tarfile_path) and not os.path.exists(tarfile_path):
|
||||
path_tarballs = '/tmp/tarball_uploads'
|
||||
if not os.path.exists(path_tarballs):
|
||||
os.makedirs(path_tarballs)
|
||||
uid, gid = pwd.getpwnam('sysinv').pw_uid, pwd.getpwnam('sysinv').pw_uid
|
||||
os.chown(path_tarballs, uid, gid)
|
||||
|
||||
# Keep unique tarball name to avoid conflicts
|
||||
tarball_name = '{}-{}'.format(time.time(), os.path.basename(tarfile_path))
|
||||
tarfile_path = os.path.join(path_tarballs, tarball_name)
|
||||
try:
|
||||
with open(tarfile_path, 'wb') as f:
|
||||
f.write(base64.urlsafe_b64decode(tarfile_binary))
|
||||
except Exception as e:
|
||||
LOG.exception('Error: writing the tarfile: {}'.format(e))
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Could not save the application on path {}".format(tarfile_path)))
|
||||
|
||||
name, version, mname, mfile = self._check_tarfile(tarfile_path, name, version,
|
||||
constants.APP_UPLOAD_OP)
|
||||
|
||||
try:
|
||||
|
@ -231,13 +251,18 @@ class KubeAppController(rest.RestController):
|
|||
LOG.exception(e)
|
||||
raise
|
||||
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.mode = constants.APP_LIFECYCLE_MODE_MANUAL
|
||||
|
||||
pecan.request.rpcapi.perform_app_upload(pecan.request.context,
|
||||
new_app, tarfile)
|
||||
new_app, tarfile_path,
|
||||
lifecycle_hook_info=lifecycle_hook_info,
|
||||
images=images)
|
||||
return KubeApp.convert_with_links(new_app)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(KubeApp, wtypes.text, wtypes.text, wtypes.text)
|
||||
def patch(self, name, directive, values):
|
||||
@wsme_pecan.wsexpose(KubeApp, wtypes.text, wtypes.text, wtypes.text, wtypes.text)
|
||||
def patch(self, name, directive, values, force=None):
|
||||
"""Install/update the specified application
|
||||
|
||||
:param name: application name
|
||||
|
@ -279,14 +304,6 @@ class KubeAppController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-apply rejected: " + str(e)))
|
||||
|
||||
try:
|
||||
self._app_lifecycle_actions(db_app,
|
||||
constants.APP_APPLY_OP,
|
||||
constants.APP_LIFECYCLE_PRE)
|
||||
except Exception as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-apply rejected: " + str(e.message)))
|
||||
|
||||
if db_app.status == constants.APP_APPLY_IN_PROGRESS:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-apply rejected: install/update is already "
|
||||
|
@ -297,13 +314,30 @@ class KubeAppController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-apply rejected: operation is not allowed "
|
||||
"while the current status is {}.".format(db_app.status)))
|
||||
|
||||
try:
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.init(constants.APP_LIFECYCLE_MODE_MANUAL,
|
||||
constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK,
|
||||
constants.APP_LIFECYCLE_TIMING_PRE,
|
||||
constants.APP_APPLY_OP)
|
||||
self._app_lifecycle_actions(db_app,
|
||||
lifecycle_hook_info)
|
||||
except Exception as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-apply rejected: " + str(e.message)))
|
||||
|
||||
db_app.status = constants.APP_APPLY_IN_PROGRESS
|
||||
db_app.progress = None
|
||||
db_app.recovery_attempts = 0
|
||||
db_app.mode = mode
|
||||
db_app.save()
|
||||
pecan.request.rpcapi.perform_app_apply(pecan.request.context,
|
||||
db_app, mode=mode)
|
||||
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.mode = constants.APP_LIFECYCLE_MODE_MANUAL
|
||||
|
||||
pecan.request.rpcapi.perform_app_apply(pecan.request.context, db_app,
|
||||
mode=mode, lifecycle_hook_info=lifecycle_hook_info)
|
||||
elif directive == 'remove':
|
||||
if db_app.status not in [constants.APP_APPLY_SUCCESS,
|
||||
constants.APP_APPLY_FAILURE,
|
||||
|
@ -311,11 +345,38 @@ class KubeAppController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-remove rejected: operation is not allowed while "
|
||||
"the current status is {}.".format(db_app.status)))
|
||||
|
||||
try:
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.init(constants.APP_LIFECYCLE_MODE_MANUAL,
|
||||
constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK,
|
||||
constants.APP_LIFECYCLE_TIMING_PRE,
|
||||
constants.APP_REMOVE_OP)
|
||||
# Converting string to boolean
|
||||
if force == 'True':
|
||||
force = True
|
||||
else:
|
||||
force = False
|
||||
|
||||
lifecycle_hook_info.extra = {constants.APP_LIFECYCLE_FORCE_OPERATION: force}
|
||||
self._app_lifecycle_actions(db_app,
|
||||
lifecycle_hook_info)
|
||||
except rpc_common.RemoteError as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-remove rejected: " + str(e.value)))
|
||||
except Exception as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-remove rejected: " + str(e.message)))
|
||||
|
||||
db_app.status = constants.APP_REMOVE_IN_PROGRESS
|
||||
db_app.progress = None
|
||||
db_app.save()
|
||||
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.mode = constants.APP_LIFECYCLE_MODE_MANUAL
|
||||
|
||||
pecan.request.rpcapi.perform_app_remove(pecan.request.context,
|
||||
db_app)
|
||||
db_app, lifecycle_hook_info=lifecycle_hook_info)
|
||||
else:
|
||||
if db_app.status not in [constants.APP_APPLY_IN_PROGRESS,
|
||||
constants.APP_UPDATE_IN_PROGRESS,
|
||||
|
@ -323,8 +384,24 @@ class KubeAppController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-abort rejected: operation is not allowed while "
|
||||
"the current status is {}.".format(db_app.status)))
|
||||
|
||||
try:
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.init(constants.APP_LIFECYCLE_MODE_MANUAL,
|
||||
constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK,
|
||||
constants.APP_LIFECYCLE_TIMING_PRE,
|
||||
constants.APP_ABORT_OP)
|
||||
self._app_lifecycle_actions(db_app,
|
||||
lifecycle_hook_info)
|
||||
except Exception as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-abort rejected: " + str(e.message)))
|
||||
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.mode = constants.APP_LIFECYCLE_MODE_MANUAL
|
||||
|
||||
pecan.request.rpcapi.perform_app_abort(pecan.request.context,
|
||||
db_app)
|
||||
db_app, lifecycle_hook_info=lifecycle_hook_info)
|
||||
return KubeApp.convert_with_links(db_app)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
|
@ -358,6 +435,19 @@ class KubeAppController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-update rejected: application not found."))
|
||||
|
||||
try:
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.init(constants.APP_LIFECYCLE_MODE_MANUAL,
|
||||
constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK,
|
||||
constants.APP_LIFECYCLE_TIMING_PRE,
|
||||
constants.APP_UPDATE_OP)
|
||||
lifecycle_hook_info[LifecycleConstants.EXTRA][LifecycleConstants.FROM_APP] = True
|
||||
self._app_lifecycle_actions(applied_app,
|
||||
lifecycle_hook_info)
|
||||
except Exception as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-update rejected: " + str(e.message)))
|
||||
|
||||
if applied_app.status == constants.APP_UPDATE_IN_PROGRESS:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-update rejected: update is already "
|
||||
|
@ -417,15 +507,19 @@ class KubeAppController(rest.RestController):
|
|||
"Application-update failed: Unable to start application update, "
|
||||
"application info update failed."))
|
||||
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.mode = constants.APP_LIFECYCLE_MODE_MANUAL
|
||||
|
||||
pecan.request.rpcapi.perform_app_update(pecan.request.context,
|
||||
applied_app, target_app,
|
||||
tarfile, operation, reuse_overrides)
|
||||
tarfile, operation,
|
||||
lifecycle_hook_info, reuse_overrides)
|
||||
|
||||
return KubeApp.convert_with_links(target_app)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
|
||||
def delete(self, name):
|
||||
@wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204)
|
||||
def delete(self, name, force=None):
|
||||
"""Delete the application with the given name
|
||||
|
||||
:param name: application name
|
||||
|
@ -444,8 +538,33 @@ class KubeAppController(rest.RestController):
|
|||
"Application-delete rejected: operation is not allowed "
|
||||
"while the current status is {}.".format(db_app.status)))
|
||||
|
||||
try:
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.init(constants.APP_LIFECYCLE_MODE_MANUAL,
|
||||
constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK,
|
||||
constants.APP_LIFECYCLE_TIMING_PRE,
|
||||
constants.APP_DELETE_OP)
|
||||
# Converting string to boolean
|
||||
if force == 'True':
|
||||
force = True
|
||||
else:
|
||||
force = False
|
||||
|
||||
lifecycle_hook_info.extra = {constants.APP_LIFECYCLE_FORCE_OPERATION: force}
|
||||
self._app_lifecycle_actions(db_app,
|
||||
lifecycle_hook_info)
|
||||
except rpc_common.RemoteError as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-delete rejected: " + str(e.value)))
|
||||
except Exception as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-delete rejected: " + str(e.message)))
|
||||
|
||||
lifecycle_hook_info = LifecycleHookInfo()
|
||||
lifecycle_hook_info.mode = constants.APP_LIFECYCLE_MODE_MANUAL
|
||||
|
||||
response = pecan.request.rpcapi.perform_app_delete(
|
||||
pecan.request.context, db_app)
|
||||
pecan.request.context, db_app, lifecycle_hook_info=lifecycle_hook_info)
|
||||
if response:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"%s." % response))
|
||||
|
@ -478,21 +597,38 @@ class KubeAppHelper(object):
|
|||
raise exception.SysinvException(_(
|
||||
"Patching operation is in progress."))
|
||||
|
||||
def _check_patch_is_applied(self, patches):
|
||||
def _check_required_patches_are_applied(self, patches=None):
|
||||
"""Validates that each patch provided is applied on the system"""
|
||||
if patches is None:
|
||||
patches = []
|
||||
try:
|
||||
system = self._dbapi.isystem_get_one()
|
||||
response = patch_api.patch_is_applied(
|
||||
response = patch_api.patch_query(
|
||||
token=None,
|
||||
timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,
|
||||
region_name=system.region_name,
|
||||
patches=patches
|
||||
region_name=system.region_name
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise exception.SysinvException(_(
|
||||
"Error while querying patch-controller for the "
|
||||
"state of the patch(es)."))
|
||||
return response
|
||||
query_patches = response['pd']
|
||||
applied_patches = []
|
||||
for patch_key in query_patches:
|
||||
patch = query_patches[patch_key]
|
||||
patchstate = patch.get('patchstate', None)
|
||||
if patchstate == patch_constants.APPLIED or \
|
||||
patchstate == patch_constants.COMMITTED:
|
||||
applied_patches.append(patch_key)
|
||||
|
||||
missing_patches = []
|
||||
for required_patch in patches:
|
||||
if required_patch not in applied_patches:
|
||||
missing_patches.append(required_patch)
|
||||
|
||||
success = not missing_patches
|
||||
return success, missing_patches
|
||||
|
||||
def _patch_report_app_dependencies(self, name, patches=None):
|
||||
if patches is None:
|
||||
|
@ -552,10 +688,12 @@ class KubeAppHelper(object):
|
|||
raise exception.SysinvException(_(
|
||||
"Application-upload rejected: manifest file is missing."))
|
||||
|
||||
def _verify_metadata_file(self, app_path, app_name, app_version):
|
||||
def _verify_metadata_file(self, app_path, app_name, app_version,
|
||||
upgrade_from_release=None):
|
||||
try:
|
||||
name, version, patches = cutils.find_metadata_file(
|
||||
app_path, constants.APP_METADATA_FILE)
|
||||
app_path, constants.APP_METADATA_FILE,
|
||||
upgrade_from_release=upgrade_from_release)
|
||||
except exception.SysinvException as e:
|
||||
raise exception.SysinvException(_(
|
||||
"metadata validation failed. {}".format(e)))
|
||||
|
@ -566,8 +704,8 @@ class KubeAppHelper(object):
|
|||
version = app_version
|
||||
|
||||
if (not name or not version or
|
||||
name == constants.APP_VERSION_PLACEHOLDER or
|
||||
version == constants.APP_VERSION_PLACEHOLDER):
|
||||
name.startswith(constants.APP_VERSION_PLACEHOLDER) or
|
||||
version.startswith(constants.APP_VERSION_PLACEHOLDER)):
|
||||
raise exception.SysinvException(_(
|
||||
"application name or/and version is/are not included "
|
||||
"in the tar file. Please specify the application name "
|
||||
|
@ -585,16 +723,19 @@ class KubeAppHelper(object):
|
|||
"{}. Communication Error with patching subsytem. "
|
||||
"Preventing application upload.".format(e)))
|
||||
|
||||
applied = self._check_patch_is_applied(patches)
|
||||
applied, missing_patches = \
|
||||
self._check_required_patches_are_applied(patches)
|
||||
if not applied:
|
||||
raise exception.SysinvException(_(
|
||||
"the required patch(es) for application {} ({}) "
|
||||
"must be applied".format(name, version)))
|
||||
"the required patch(es) ({}) for application {} ({}) "
|
||||
"must be applied".format(', '.join(missing_patches),
|
||||
name, version)))
|
||||
|
||||
LOG.info("The required patch(es) for application {} ({}) "
|
||||
"has/have applied.".format(name, version))
|
||||
else:
|
||||
LOG.info("No patch required for application {} ({}).".format(name, version))
|
||||
LOG.info("No patch required for application {} ({})."
|
||||
"".format(name, version))
|
||||
|
||||
return name, version, patches
|
||||
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
from sysinv.api.controllers.v1 import base
|
||||
from sysinv.api.controllers.v1 import collection
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils
|
||||
|
||||
|
||||
class KubeCluster(base.APIBase):
|
||||
"""API representation of a Kubernetes cluster."""
|
||||
|
||||
cluster_name = wtypes.text
|
||||
"Cluster name"
|
||||
|
||||
cluster_version = wtypes.text
|
||||
"Cluster active version"
|
||||
|
||||
cluster_api_endpoint = wtypes.text
|
||||
"Cluster Public API Endpoint URL"
|
||||
|
||||
cluster_ca_cert = wtypes.text
|
||||
"Cluster Root CA Certificate Data"
|
||||
|
||||
admin_client_cert = wtypes.text
|
||||
"Administrative Client Certificate Data"
|
||||
|
||||
admin_client_key = wtypes.text
|
||||
"Administrative Client Key Data"
|
||||
|
||||
admin_user = wtypes.text
|
||||
"Administrative User Name"
|
||||
|
||||
admin_token = wtypes.text
|
||||
"Administrative Service Account Token (base64 encoded)"
|
||||
|
||||
@classmethod
|
||||
def convert(cls, kube_cluster_data):
|
||||
return KubeCluster(**kube_cluster_data)
|
||||
|
||||
|
||||
class KubeClusterCollection(collection.Collection):
|
||||
"""API representation of a collection of Kubernetes clusters."""
|
||||
|
||||
kube_clusters = [KubeCluster]
|
||||
"A list containing Kubernetes cluster objects"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._type = 'kube_clusters'
|
||||
|
||||
@classmethod
|
||||
def convert(cls, kube_cluster_list):
|
||||
collection = KubeClusterCollection()
|
||||
collection.kube_clusters = [KubeCluster.convert(d)
|
||||
for d in kube_cluster_list]
|
||||
return collection
|
||||
|
||||
|
||||
class KubeClusterController(rest.RestController):
|
||||
"""REST controller for Kubernetes clusters."""
|
||||
|
||||
def __init__(self, parent=None, **kwargs):
|
||||
self._parent = parent
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
@wsme_pecan.wsexpose(KubeClusterCollection)
|
||||
def get_all(self):
|
||||
"""Retrieve a list of Kubernetes clusters."""
|
||||
|
||||
# Currently only a single cluster is supported
|
||||
kube_cluster = self._get_kube_cluster(kubernetes.KUBERNETES_CLUSTER_DEFAULT)
|
||||
kube_clusters = [kube_cluster]
|
||||
|
||||
return KubeClusterCollection.convert(kube_clusters)
|
||||
|
||||
@wsme_pecan.wsexpose(KubeCluster, wtypes.text)
|
||||
def get_one(self, name=kubernetes.KUBERNETES_CLUSTER_DEFAULT):
|
||||
"""Retrieve information about the given Kubernetes cluster."""
|
||||
|
||||
kube_cluster = self._get_kube_cluster(name)
|
||||
|
||||
return KubeCluster.convert(kube_cluster)
|
||||
|
||||
def _get_kube_cluster(self, cluster_name):
|
||||
# Get the current version information
|
||||
cluster_version = self._kube_operator.kube_get_kubernetes_version()
|
||||
|
||||
# Retrieve the default kubernetes cluster configuration
|
||||
cluster_config = self._kube_operator.kube_get_kubernetes_config()
|
||||
cluster_ca_cert = utils.get_file_content(cluster_config.ssl_ca_cert)
|
||||
admin_client_cert = utils.get_file_content(cluster_config.cert_file)
|
||||
admin_client_key = utils.get_file_content(cluster_config.key_file)
|
||||
|
||||
# Build public endpoint from private endpoint
|
||||
endpoint_parsed = urlparse(cluster_config.host)
|
||||
endpoint_host = utils.format_url_address(self._get_oam_address())
|
||||
endpoint_netloc = "{}:{}".format(endpoint_host, endpoint_parsed.port)
|
||||
cluster_api_endpoint = endpoint_parsed._replace(
|
||||
netloc=endpoint_netloc).geturl()
|
||||
|
||||
# Retrieve the default cluster admin service account token
|
||||
admin_user = kubernetes.KUBERNETES_ADMIN_USER
|
||||
admin_token = self._kube_operator.kube_get_service_account_token(
|
||||
admin_user, kubernetes.NAMESPACE_KUBE_SYSTEM)
|
||||
|
||||
return {
|
||||
"cluster_name": cluster_name,
|
||||
"cluster_version": cluster_version,
|
||||
'cluster_api_endpoint': cluster_api_endpoint,
|
||||
"cluster_ca_cert": cluster_ca_cert,
|
||||
"admin_client_cert": admin_client_cert,
|
||||
"admin_client_key": admin_client_key,
|
||||
"admin_user": admin_user,
|
||||
"admin_token": admin_token
|
||||
}
|
||||
|
||||
def _get_oam_address(self):
|
||||
address_name = utils.format_address_name(
|
||||
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_OAM)
|
||||
address = pecan.request.dbapi.address_get_by_name(address_name)
|
||||
return address.address
|
|
@ -21,6 +21,7 @@ from sysinv.api.controllers.v1 import link
|
|||
from sysinv.api.controllers.v1 import patch_api
|
||||
from sysinv.api.controllers.v1 import types
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import dc_api
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.common import utils as cutils
|
||||
|
@ -187,6 +188,7 @@ class KubeUpgradeController(rest.RestController):
|
|||
"""Create a new Kubernetes Upgrade and start upgrade."""
|
||||
|
||||
force = body.get('force', False) is True
|
||||
alarm_ignore_list = body.get('alarm_ignore_list')
|
||||
|
||||
# There must not be a platform upgrade in progress
|
||||
try:
|
||||
|
@ -247,7 +249,10 @@ class KubeUpgradeController(rest.RestController):
|
|||
|
||||
# The system must be healthy
|
||||
success, output = pecan.request.rpcapi.get_system_health(
|
||||
pecan.request.context, force=force, kube_upgrade=True)
|
||||
pecan.request.context,
|
||||
force=force,
|
||||
kube_upgrade=True,
|
||||
alarm_ignore_list=alarm_ignore_list)
|
||||
if not success:
|
||||
LOG.info("Health query failure during kubernetes upgrade start: %s"
|
||||
% output)
|
||||
|
@ -378,6 +383,12 @@ class KubeUpgradeController(rest.RestController):
|
|||
|
||||
LOG.info("Completed kubernetes upgrade to version: %s" %
|
||||
kube_upgrade_obj.to_version)
|
||||
|
||||
# If applicable, notify dcmanager upgrade is complete
|
||||
system = pecan.request.dbapi.isystem_get_one()
|
||||
role = system.get('distributed_cloud_role')
|
||||
if role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
|
||||
dc_api.notify_dcmanager_kubernetes_upgrade_completed()
|
||||
return KubeUpgrade.convert_with_links(kube_upgrade_obj)
|
||||
|
||||
else:
|
||||
|
|
|
@ -7,8 +7,8 @@ import json
|
|||
import time
|
||||
|
||||
from oslo_log import log
|
||||
from sysinv.api.controllers.v1.rest_api import rest_api_request
|
||||
from sysinv.common import exception as si_exception
|
||||
from sysinv.common.rest_api import rest_api_request
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -345,8 +345,26 @@ class NetworkController(rest.RestController):
|
|||
pecan.request.rpcapi.reconfigure_service_endpoints(
|
||||
pecan.request.context, chosts[0])
|
||||
|
||||
# After the initial configration completed, we can still delete/add
|
||||
# the system controller networks in a subcloud's controller to
|
||||
# re-home a subcloud to a new central cloud. In this case, we want
|
||||
# to update the related services configurations in runtime.
|
||||
if cutils.is_initial_config_complete() and \
|
||||
network['type'] in [constants.NETWORK_TYPE_SYSTEM_CONTROLLER,
|
||||
constants.NETWORK_TYPE_SYSTEM_CONTROLLER_OAM]:
|
||||
self._update_system_controller_network_config(network['type'])
|
||||
return Network.convert_with_links(result)
|
||||
|
||||
def _update_system_controller_network_config(self, type):
|
||||
""" Update related services configurations after updating system
|
||||
controller networks"""
|
||||
if type == constants.NETWORK_TYPE_SYSTEM_CONTROLLER:
|
||||
pecan.request.rpcapi.update_ldap_client_config(
|
||||
pecan.request.context)
|
||||
elif type == constants.NETWORK_TYPE_SYSTEM_CONTROLLER_OAM:
|
||||
pecan.request.rpcapi.update_dnsmasq_config(
|
||||
pecan.request.context)
|
||||
|
||||
@wsme_pecan.wsexpose(NetworkCollection,
|
||||
types.uuid, int, wtypes.text, wtypes.text)
|
||||
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||
|
|
|
@ -464,7 +464,13 @@ class OAMNetworkController(rest.RestController):
|
|||
if rpc_extoam[field] != extoam[field]:
|
||||
rpc_extoam[field] = extoam[field]
|
||||
|
||||
rpc_extoam.save()
|
||||
rpc_extoam.save() # pylint: disable=no-value-for-parameter
|
||||
|
||||
# Update OAM networking configuration with the new unit IPs of each
|
||||
# controller when transitioning to a duplex system
|
||||
system = pecan.request.dbapi.isystem_get_one()
|
||||
if system.capabilities.get('simplex_to_duplex_migration'):
|
||||
rpc_extoam.migrate_to_duplex()
|
||||
|
||||
pecan.request.rpcapi.update_oam_config(pecan.request.context)
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from oslo_log import log
|
||||
from sysinv.api.controllers.v1.rest_api import rest_api_request
|
||||
from sysinv.api.controllers.v1.rest_api import get_token
|
||||
from sysinv.common.rest_api import get_token
|
||||
from sysinv.common.rest_api import rest_api_request
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2015-2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2015-2021 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -9,6 +9,7 @@ from pecan import rest
|
|||
import wsme
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
from ast import literal_eval
|
||||
|
||||
from oslo_log import log
|
||||
from sysinv._i18n import _
|
||||
|
@ -327,6 +328,16 @@ class PCIDeviceController(rest.RestController):
|
|||
rpc_device[field] = None
|
||||
else:
|
||||
rpc_device[field] = getattr(device, field)
|
||||
if field == 'sriov_numvfs':
|
||||
# Save desired number of VFs in extra_info since
|
||||
# sriov_numvfs may get overwritten by concurrent inventory report
|
||||
expected_numvfs = {'expected_numvfs': rpc_device[field]}
|
||||
if not rpc_device['extra_info']:
|
||||
rpc_device['extra_info'] = str(expected_numvfs)
|
||||
else:
|
||||
extra_info = literal_eval(rpc_device['extra_info'])
|
||||
extra_info.update(expected_numvfs)
|
||||
rpc_device['extra_info'] = str(extra_info)
|
||||
|
||||
rpc_device.save()
|
||||
|
||||
|
@ -348,29 +359,29 @@ def _check_host(host):
|
|||
|
||||
|
||||
def _check_field(field):
|
||||
if field not in ["enabled", "name", "driver", "sriov_numvfs", "sriov_vf_driver"]:
|
||||
if field not in ["enabled", "name", "driver", "sriov_numvfs", "sriov_vf_driver", "extra_info"]:
|
||||
raise wsme.exc.ClientSideError(_('Modifying %s attribute restricted') % field)
|
||||
|
||||
|
||||
def _check_device_sriov(device, host):
|
||||
sriov_update = False
|
||||
if (device['pdevice_id'] == dconstants.PCI_DEVICE_ID_FPGA_INTEL_5GNR_FEC_PF and
|
||||
if (device['pdevice_id'] in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
|
||||
host.invprovision != constants.PROVISIONED):
|
||||
raise wsme.exc.ClientSideError(_("Cannot configure device %s "
|
||||
"until host %s is unlocked for the first time." %
|
||||
(device['uuid'], host.hostname)))
|
||||
|
||||
if (device['pdevice_id'] not in dconstants.SRIOV_ENABLED_DEVICE_IDS and
|
||||
if (device['pdevice_id'] not in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
|
||||
'sriov_numvfs' in device.keys() and device['sriov_numvfs']):
|
||||
raise wsme.exc.ClientSideError(_("The number of SR-IOV VFs is specified "
|
||||
"but the device is not supported for SR-IOV"))
|
||||
|
||||
if (device['pdevice_id'] not in dconstants.SRIOV_ENABLED_DEVICE_IDS and
|
||||
if (device['pdevice_id'] not in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
|
||||
'sriov_vf_driver' in device.keys() and device['sriov_vf_driver']):
|
||||
raise wsme.exc.ClientSideError(_("The SR-IOV VF driver is specified "
|
||||
"but the device is not supported for SR-IOV"))
|
||||
|
||||
if device['pdevice_id'] not in dconstants.SRIOV_ENABLED_DEVICE_IDS:
|
||||
if device['pdevice_id'] not in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS:
|
||||
return sriov_update
|
||||
|
||||
if 'sriov_numvfs' not in device.keys():
|
||||
|
@ -400,15 +411,18 @@ def _check_device_sriov(device, host):
|
|||
raise wsme.exc.ClientSideError(_("The SR-IOV VF driver must be specified"))
|
||||
else:
|
||||
if (device['sriov_vf_driver'] is not None and
|
||||
device['pdevice_id'] == dconstants.PCI_DEVICE_ID_FPGA_INTEL_5GNR_FEC_PF and
|
||||
device['sriov_vf_driver'] not in dconstants.FPGA_INTEL_5GNR_FEC_VF_VALID_DRIVERS):
|
||||
device['pdevice_id'] in
|
||||
dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
|
||||
device['sriov_vf_driver'] not in
|
||||
dconstants.FPGA_INTEL_5GNR_FEC_VF_VALID_DRIVERS):
|
||||
msg = (_("Value for SR-IOV VF driver must be one of "
|
||||
"{}").format(', '.join(dconstants.FPGA_INTEL_5GNR_FEC_VF_VALID_DRIVERS)))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
if ('driver' in device.keys() and device['driver'] and
|
||||
device['pdevice_id'] == dconstants.PCI_DEVICE_ID_FPGA_INTEL_5GNR_FEC_PF and
|
||||
device['driver'] not in dconstants.FPGA_INTEL_5GNR_FEC_PF_VALID_DRIVERS):
|
||||
device['pdevice_id'] in dconstants.SRIOV_ENABLED_FEC_DEVICE_IDS and
|
||||
device['driver'] not in
|
||||
dconstants.FPGA_INTEL_5GNR_FEC_PF_VALID_DRIVERS):
|
||||
msg = (_("Value for SR-IOV PF driver must be one of "
|
||||
"{}").format(', '.join(dconstants.FPGA_INTEL_5GNR_FEC_PF_VALID_DRIVERS)))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
import jsonpatch
|
||||
|
@ -79,7 +79,8 @@ INTERFACE_PROFILE_FIELDS = ['ifname', 'iftype', 'imtu', 'networktype',
|
|||
'txhashpolicy', 'forihostid', 'datanetworks',
|
||||
'vlan_id', 'ipv4_mode', 'ipv6_mode',
|
||||
'ipv4_pool', 'ipv6_pool',
|
||||
'sriov_numvfs', 'sriov_vf_driver']
|
||||
'sriov_numvfs', 'sriov_vf_driver', 'max_tx_rate',
|
||||
'primary_reselect']
|
||||
|
||||
|
||||
class Profile(base.APIBase):
|
||||
|
@ -1355,6 +1356,7 @@ def _create_if_profile(profile_name, profile_node):
|
|||
'ipv6_pool': ipv6_mode['pool'],
|
||||
'sriov_numvfs': ethIf.virtualFunctions,
|
||||
'sriov_vf_driver': ethIf.virtualFunctionDriver,
|
||||
'max_tx_rate': ethIf.maxTxRate,
|
||||
'interface_profile': True
|
||||
}
|
||||
newIf = interface_api._create(idict, from_profile=True)
|
||||
|
@ -1388,6 +1390,7 @@ def _create_if_profile(profile_name, profile_node):
|
|||
'networktype': nt,
|
||||
'aemode': aeIf.aeMode,
|
||||
'txhashpolicy': aeIf.txPolicy,
|
||||
'primary_reselect': aeIf.primary_reselect,
|
||||
'forihostid': iprofile_id,
|
||||
'datanetworks': providernets,
|
||||
'ipv4_mode': ipv4_mode,
|
||||
|
@ -1397,6 +1400,7 @@ def _create_if_profile(profile_name, profile_node):
|
|||
'imtu': aeIf.mtu,
|
||||
'sriov_numvfs': ethIf.virtualFunctions,
|
||||
'sriov_vf_driver': ethIf.virtualFunctionDriver,
|
||||
'max_tx_rate': ethIf.maxTxRate,
|
||||
'interface_profile': True
|
||||
}
|
||||
|
||||
|
@ -1425,6 +1429,7 @@ def _create_if_profile(profile_name, profile_node):
|
|||
'imtu': vlanIf.mtu,
|
||||
'sriov_numvfs': ethIf.virtualFunctions,
|
||||
'sriov_vf_driver': ethIf.virtualFunctionDriver,
|
||||
'max_tx_rate': ethIf.maxTxRate,
|
||||
'interface_profile': True
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2015 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2021 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
import netaddr
|
||||
|
@ -136,6 +136,7 @@ class PciSriov(Network):
|
|||
super(PciSriov, self).__init__(node, constants.NETWORK_TYPE_PCI_SRIOV)
|
||||
self.virtualFunctions = int(node.get('virtualFunctions'))
|
||||
self.virtualFunctionDriver = node.get('virtualFunctionDriver')
|
||||
self.maxTxRate = node.get('maxTxRate')
|
||||
|
||||
|
||||
class Interface(object):
|
||||
|
@ -150,6 +151,7 @@ class Interface(object):
|
|||
self.routes = []
|
||||
self.virtualFunctions = 0
|
||||
self.virtualFunctionDriver = None
|
||||
self.maxTxRate = None
|
||||
networksNode = ifNode.find('networks')
|
||||
if networksNode is not None:
|
||||
for netNode in networksNode:
|
||||
|
@ -171,6 +173,7 @@ class Interface(object):
|
|||
elif network.networkType == constants.NETWORK_TYPE_PCI_SRIOV:
|
||||
self.virtualFunctions = network.virtualFunctions
|
||||
self.virtualFunctionDriver = network.virtualFunctionDriver
|
||||
self.maxTxRate = network.maxTxRate
|
||||
|
||||
if isinstance(network, Network):
|
||||
self.providerNetworks = network.providerNetworks
|
||||
|
@ -304,12 +307,15 @@ class AeInterface(Interface):
|
|||
if node.tag == 'activeStandby':
|
||||
self.aeMode = 'activeStandby'
|
||||
self.txPolicy = None
|
||||
self.primary_reselect = node.get('primary_reselect')
|
||||
elif node.tag == 'balanced':
|
||||
self.aeMode = 'balanced'
|
||||
self.txPolicy = node.get('txPolicy')
|
||||
self.primary_reselect = None
|
||||
elif node.tag == 'ieee802.3ad':
|
||||
self.aeMode = '802.3ad'
|
||||
self.txPolicy = node.get('txPolicy')
|
||||
self.primary_reselect = None
|
||||
|
||||
node = ifNode.find('interfaces')
|
||||
if node:
|
||||
|
|
|
@ -45,7 +45,8 @@ SYSINV_ROUTE_MAX_PATHS = 4
|
|||
ALLOWED_NETWORK_TYPES = [constants.NETWORK_TYPE_DATA,
|
||||
constants.NETWORK_TYPE_MGMT,
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.NETWORK_TYPE_STORAGE]
|
||||
constants.NETWORK_TYPE_STORAGE,
|
||||
constants.NETWORK_TYPE_OAM]
|
||||
|
||||
|
||||
class Route(base.APIBase):
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue