Merge remote-tracking branch 'gerrit/master' into f/centos8
Change-Id: Ie48d34064ad1c150692f6b0942723390374f1d34
This commit is contained in:
commit
cb4cf4299c
95
.zuul.yaml
95
.zuul.yaml
|
@ -32,6 +32,9 @@
|
|||
- cgtsclient-tox-py36
|
||||
- cgtsclient-tox-pep8
|
||||
- cgtsclient-tox-pylint
|
||||
post:
|
||||
jobs:
|
||||
- stx-config-upload-git-mirror
|
||||
|
||||
- job:
|
||||
name: sysinv-tox-py27
|
||||
|
@ -193,3 +196,95 @@
|
|||
vars:
|
||||
tox_envlist: pylint
|
||||
tox_extra_args: -c sysinv/cgts-client/cgts-client/tox.ini
|
||||
|
||||
- job:
|
||||
name: stx-config-upload-git-mirror
|
||||
parent: upload-git-mirror
|
||||
description: >
|
||||
Mirrors opendev.org/starlingx/config to
|
||||
github.com/starlingx/config
|
||||
vars:
|
||||
git_mirror_repository: starlingx/config
|
||||
secrets:
|
||||
- name: git_mirror_credentials
|
||||
secret: stx-config-github-secret
|
||||
pass-to-parent: true
|
||||
|
||||
- secret:
|
||||
name: stx-config-github-secret
|
||||
data:
|
||||
user: git
|
||||
host: github.com
|
||||
# yamllint disable-line rule:line-length
|
||||
host_key: github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
ssh_key: !encrypted/pkcs1-oaep
|
||||
- ezp3pjapGFU4p0lqwUsI9o6qvWaoTahWJ/j7i27D8wJ6gNmLUuiWTQlfJHoxGyp+EOWpF
|
||||
WgmBytwN9yAMf7kfiTHLWIaKIlw7ruErsi0pWkI5h5hWqxQCea+cQywLc5xby53NWc+Y/
|
||||
c/N/sNYh/+jeH2d1Pn4MDKEaeGKkjHyHd3ZDyLaH0qUSrtTQt5V4TJe8h5L8Vr+jIs/wr
|
||||
I6JFbw+wMDLeTjnJGPz3HZpvjAAbdKdtLmi30egH4WV1nmv1eEFV4vXaoclhCbJdcu2vK
|
||||
b4nRR8nEXGqcsC88en2pGEf2xma8pIlmGbcuTz7Zn1J9Mez4wPUjTjVKu/DRh8Zm1/lgg
|
||||
ZpDYkxmD+MSUIOr05/MMs5Czl7ZSEU6mQ7PYy92MYJn2H3xbIC1lAbhO7BQRxTKBUWThL
|
||||
KWz0qPXssAvHPaQCBnBYzGou97KLW8umGRiYywhobK+NQEWerMp9sj2/7ZgPQExQ7dzVI
|
||||
eWHqaMlIkTUM+ZCE9MGHnVmHrAPYcF3m1eTfZFoC8JnJ5QODwvp+92oEIYuMeNslqOat2
|
||||
Lu/gRqvO434ULBmglLesn/XPH9Rsvrxi+FZOT4MwvQqu9puXExP4mLMTlUuHD+X6pqW7x
|
||||
IUOuUyPZM8OsVELYN755DamIswOCTuLuTOYMBUYXcoOMTeOHd2ynuF5ebAhwRE=
|
||||
- W8Xtd1UWccHXJBf16iZ7O/QpgOHrRLw/o45TZ9hwI877QLnkCKkaa8Qudqixeh1SXghg7
|
||||
Y/r1CRlir7DSN3JWv98T8MWttrBIJ0IqtqGKycW6N5PrYhwXd8xcgLHMZBK8H44Lim1GW
|
||||
WsHAQIagA9+86NtQBlmDpiVchhj5JZRfSOcU/ahQPaVGXyZPRTZu9iymPCCTBdU57Jcow
|
||||
6h3+JVt55/Bvf/i0ZtbZUH2rt7L65GMaYJaifzldLZ73kytbjFJhRqCKlQHdEdYw9Wz9C
|
||||
zC71h5YFuqcv9uGKcVarEI1XjGyoMAK16Yg/mP1i27Cztu0WkgZOVC/mRrpT7Q1hh9+5l
|
||||
A+eMJjccgAVI0eE7P+m3WzHaZ0eWqDI03EDZWlQWM76YN9oQDAdmDyaSrGlF/zBC8p+qX
|
||||
wA0Cy+hbnhLi/Hhrz8qh+/6DHzPR85rnfAd7TmbYuRc6wyfVM+NPvVt5t4oei/9Dcc07U
|
||||
9SsLPX+B6AuXyZm0Ux3kNLocoh3XoVUtT88n44nPqgUe3/50ROwfGm3HD9s5MMBhXYm+e
|
||||
y/aMRbhOgzUw8NG4OUT65tnSQOtxhSt95axk5R6qaE9Kx0DXJduyuM8vAxKHQR0zH5j1W
|
||||
v9BUbFeKasADUav+nICNuS4EemjiYlTG9MrLJqPHUuKYwc1JZF60Pw8G3kMSBM=
|
||||
- q5m73hBJyAm6lkhpvgvbdRur2WO4de9i/6Dhr28B3FJr1IGqIG1KNZspeJLAJVBauBsIm
|
||||
qUw3WYjy0s/n88/6bV38YxwTZ/oslXK+vMNmTGabbUJqwd5+pcvWVi77ubgOwT3U48/e4
|
||||
xfbtG7gD+ch/rotu9H+c/gUTvzdJ6hLg8QP0ylOWAp5JB1epAaGw4mY3xKej8tyoVGNQX
|
||||
B3/Zo9ueS3AvdIJKwU7SmSSs0Cr7r1eFafN9ySOWV/3TZZLWnk9iW6dm1XE5GN89bt8Jd
|
||||
PYXH1KYTxdPrPtmOuq1sukjjWTUEdcY+ei6sb4wVrSxqp4w55AMZ/tM9aFZad5NWAPwXy
|
||||
kGsPPOYZiBix/4t2O/FZZ+dfKkm1Sa6WgKAyQoQZ6wcHvnfkhtmHPu3hywmEnT/jVYLk9
|
||||
bM9hZrhwPGhWDKRuNbb4nLLnRoYcLBJcwR775ZW3E9tauJzfF7xIC6DbhQKYEz8yi+SR8
|
||||
naciC6+ZM0VuaeNGU+X8cVJbenLoge4+RTBI0NtY4d3SP0fYIVkJ0HQDfK74suALSQkC/
|
||||
TFshuilrSlvFC/QHL/PrLbGB7dQciF/9Hps6N0OCUT6iaq79tKEVQ4CiV/skEVxIpRxTJ
|
||||
QizWcFeLj8jp5C+rFRppOSbGaLcyoK1zvpgYcgDrmwCRrgg0Ek4jMrLC/X3ZVc=
|
||||
- bBua7K+SwYJmj9Z5JFxjivnSLSH6hRCqswhmi4AStrUnRFtH1UOPI/ca1gi6IWpCrBjkX
|
||||
BxOZSRtDfC1Nd7fgbSKGpEMoRpjuAWfz6ZZA91izTxsS7mYkUwtyvaWVrLOvPeJp0zsxO
|
||||
LZTVQ3zAmJRe9vaHcqRwCq1jedwsbovjFWPBMXBZ69Olk50WjfJ57BC0y4ih70xADP6l6
|
||||
aSqS6813nSD1L8J1bA9TsYFLnoWfwb9SnWGXXQ3A8fHW1edfybEglr7TVfQF0LOB3cUOh
|
||||
E+AEhciHvRglLTkVZDXs2r+suhIqN8YAgretp38KEDeItw3tE/qG3ChwJrIGMSYa3qVty
|
||||
zANgyAfaZy+NwtbFk/NXRmxKLpGOIMWl2rGQyfjvcpFp081uHhTZEQADlDd5ptm52uT9+
|
||||
BsHpfjUz/5DnPk0Q0tUmT7EYSUKvGXd6+j4PjOgRr2F7gFW1jFBWeaPlx3gHkn4RZWoGN
|
||||
JAS+fuLeeOBEXpIAeRyE3++y5Vn04sGoauGiyVnbn4Im3DmsxpV7K4SJRt1OxpIIWvoYr
|
||||
g4uVl1pOfOge74vO0rErH0ybGmv6uBXffuiTMZ2sCQUgwaeqYzv8wMQXdCZJkmZSoeMpb
|
||||
FyF38GUan3YaFDQUvqJTW6jQmjZDdw8yJeVCsmtjKz5v/JZZPgLNMKqjwSLgpI=
|
||||
- zxs9GmmDk3wwlELpWKHmMzXXzeg2POow5wESMRQp/xWtqIDEKRzEI2IlCUWecw4LRi2NX
|
||||
J9hmxOVSHjbw7t1RPNPTWTDlkQjIqw0/JGQkMcG45jR7R1NYHPhz0ZWvVwOOzZEJ0zTrv
|
||||
ByltmwfFZctm4BmcBD+2b/Dc4SmxZNarWUnsY66prjzRAPdytcPi4L+Ipy0fmguZNp0zP
|
||||
BzINlqfPp/BDWhHaG+Xb+mtgT6j0RZJCFTCybtbyy+XTvMYlGvaegmM7uFqUXE5Wc5i/4
|
||||
v3ezrNUVFgNfAMBxzS/xuezwuYj3FcjifW3LIiai7uKC2MYfq9CUMBcmMnSAef0OIf6fn
|
||||
valXLG3Q11Al8w/40gTTDD2wE/2svauVtnDVZiiKawcz29zdrRSrAqUX2dYD5bp+t/6FX
|
||||
GNVGEmFpodBoLWkYuBR9RQ4q3VYfYv3KG9lppkrOzBYjhsniyTKLewRsSYaVcmXxfwVBy
|
||||
pPC25HG26kf04bsK/zmLaRUjWJgsc+y/saOhxawxVm/tZnFCfjlfxTRvnrpgsEYwJEMFK
|
||||
QTGBU5nwEPEwbiwLW05dx5zvIA2cK2QO47X4cNFZX6wVjKAQPfxJkjRH/zA5r5qmBrnuh
|
||||
IJL0R1iRt9NwZlxxE/ShMkeMoJZ8IcYASOyaKip116874rcaKQAPjXLDcqqxIc=
|
||||
- ky0Mg/8OpN2Uz9XFtIa1r3jzQ9/jrVD4BZjrU0mH9VS5f0UMmXT9hfnMUmRNVPQ2aGAcY
|
||||
b7JTpTjKpHUpZ3zfJqfvU9jgREAMeTQRyNXoWPBUgdG1h0tCY0dp81noIqyybDQOZzk+X
|
||||
AP6OC83Or55q4jx8424WvqIiBKVZyLAGZrl0FuAJVX9g/dvQmvvuXUR2Sq49c+Wvji5m/
|
||||
en6bhs8ONZsHJTZz/C0DFNeSa4Bt7yhGS5lB5tQ95dBamA50nTN2Sz1mSp/X4vdzBVmTA
|
||||
SkNcJvHrQY+6tKLFIC9w606HqTJNnVaNWoFiRgDMJmZ7WHGpBfYhXqo9XD2nBrbgMPkoZ
|
||||
R70JZJXXTilrZ6Ja9LA4EgOzHMb1G4lKTgs+L2tJgWAt+j+nG4K2rFd52OdgoI28eDa2f
|
||||
npNLQiGP3MztESao2DPWz1Zt5dBV7om5BY0So6aAfcDdpsHio9JGoh0OAcky9dhYRD2bo
|
||||
/BpYweIWb3CGIpUcPd7WfS8tKs5tKtxpPcNIoVwbAjHsXMgJB7liqLKZlat7gg0KwTPNT
|
||||
gGwXRp3YcBRUJ+Br1oH16v8Jl+WnlnVgoNAza53ifWIBbleF5eho1G2bBosNlUvnDStx/
|
||||
urJyz34jHhaPdjhGB33/M8hDiOGv09RsrzfYPFdCMgtRhZMnuN0PE88HvXSBPs=
|
||||
- y/EGjzkaBYJoPHHESLOk5auWYHvRfh6BI5KtzziEs5vlHa2bMe5L2FhIxqkeXK48E7PuV
|
||||
j09FTSfKXIj+4mHRMEeIZBbtACBCO9kj1FBBcoOp3aSd4Gj/3so0aFHzvqdqhlrt2LSRa
|
||||
kDs20HmfvyE60KWZmBrGD1swMREzh/XCEvg2hqgXnv0gmwXI21lsTpwUVO1MwnIHySpox
|
||||
+7YG79ihtqdke0D4WRrM08TSKdEsl11X1O6mOgvrNKPOlJiD7RCju1S1Zr4UXuzolp5GO
|
||||
caCvAQbCjiP3FnrsuK4GwqvKQaXcG7tFlgHovZrNTNgQdATVh9D09ge0uIjn+c6gUH1jg
|
||||
o/HDLh+Lho0exWq1VUVTTVHXjznLYwJwEcRpU7ZCHC7sTE0p6dNd7yGarSa8lKbtTmFUF
|
||||
ZMfPNJ8fV4J2NXEE7K5JYXy4IckqHbFkL71PcuKLQNF0clV6QGajuxTSch0i/UqEfH5cI
|
||||
dHPr4PoVhL3rY2+RBjFprDr2TUfFUcB7arnlUAx/+K1BOhwJ2xf+MS2Vg2uhpXO46QtnY
|
||||
UHIoz1pMMTVsrh7Fgg2+A6aX5s8HN+IMKx/Uc96D2eEQAZYMshDh0qf3p/pmu9ul54/T5
|
||||
0gBR90klppFubnh3yiCO2O1brec3uACWMjKMPAUAMszGljPQ4C8wDGvCGWz1YE=
|
||||
|
|
|
@ -4467,6 +4467,17 @@ badMediaType (415)
|
|||
"uuid":"70649b44-b462-445a-9fa5-9233a1b5842d"
|
||||
}
|
||||
|
||||
*******************************
|
||||
Applies the PTP configuration
|
||||
*******************************
|
||||
|
||||
.. rest_method:: POST /v1/ptp/apply
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
204
|
||||
|
||||
|
||||
-------------
|
||||
External OAM
|
||||
-------------
|
||||
|
@ -10289,7 +10300,7 @@ Show detailed information about a host filesystem
|
|||
***************************************************
|
||||
|
||||
|
||||
.. rest_method:: GET /v1/ihosts/{host_id}/host_fs/{host_fs_id}
|
||||
.. rest_method:: GET /v1/host_fs/{host_fs_id}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
|
@ -10355,7 +10366,7 @@ This operation does not accept a request body.
|
|||
Modifies specific host filesystem(s)
|
||||
*************************************
|
||||
|
||||
.. rest_method:: PATCH /v1/ihosts/{host_id}/host_fs/update_many
|
||||
.. rest_method:: PUT /v1/ihosts/{host_id}/host_fs/update_many
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
|
@ -10906,7 +10917,7 @@ Install System Certificate
|
|||
|
||||
.. rest_method:: POST /v1/certificate/certificate_install
|
||||
|
||||
Accepts a PEM file containing the X509 certificate.
|
||||
Accepts a PEM file containing the X509 certificates.
|
||||
|
||||
For security reasons, the original certificate, containing the private
|
||||
key, will be removed, once the private key is processed.
|
||||
|
@ -11014,6 +11025,59 @@ itemNotFound (404)
|
|||
|
||||
This operation does not accept a request body.
|
||||
|
||||
**************************
|
||||
Deletes a CA certificate
|
||||
**************************
|
||||
|
||||
.. rest_method:: DELETE /v1/certificate/{uuid}
|
||||
|
||||
**Normal response codes**
|
||||
|
||||
200
|
||||
|
||||
**Error response codes**
|
||||
|
||||
serviceUnavailable (503), badRequest (400), unauthorized (401),
|
||||
forbidden (403), badMethod (405), overLimit (413), itemNotFound (404)
|
||||
|
||||
**Request parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"uuid", "URI", "csapi:UUID", "The unique identifier of the CA Certificate."
|
||||
|
||||
**Response parameters**
|
||||
|
||||
.. csv-table::
|
||||
:header: "Parameter", "Style", "Type", "Description"
|
||||
:widths: 20, 20, 20, 60
|
||||
|
||||
"uuid (Optional)", "plain", "csapi:UUID", "The universally unique identifier for this object."
|
||||
"certtype (Optional)", "plain", "xsd:string", "The type of the certificate."
|
||||
"signature (Optional)", "plain", "xsd:string", "The signature of the certificate."
|
||||
"details (Optional)", "plain", "xsd:string", "A dictionary of the certificate details."
|
||||
"links (Optional)", "plain", "xsd:list", "For convenience, resources contain links to themselves. This allows a client to easily obtain rather than construct resource URIs. The following types of link relations are associated with resources: a self link containing a versioned link to the resource, and a bookmark link containing a permanent link to a resource that is appropriate for long term storage."
|
||||
"created_at (Optional)", "plain", "xsd:dateTime", "The time when the object was created."
|
||||
"updated_at (Optional)", "plain", "xsd:dateTime", "The time when the object was last updated."
|
||||
"start_date (Optional)", "plain", "xsd:dateTime", "The time when the certificate becomes valid."
|
||||
"expiry_date (Optional)", "plain", "xsd:dateTime", "The time when the certificate expires."
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"uuid": "32e8053a-04de-468c-a3c3-6bf55be4d0e6",
|
||||
"certtype": "ssl_ca",
|
||||
"expiry_date": "2022-12-14T15:08:25+00:00",
|
||||
"details": null,
|
||||
"signature": "ssl_ca_9552807080826043442",
|
||||
"start_date":"2020-02-24T15:08:25+00:00",
|
||||
"issuer": null
|
||||
}
|
||||
|
||||
This operation does not accept a request body.
|
||||
|
||||
---------------
|
||||
Docker Registry
|
||||
---------------
|
||||
|
|
|
@ -344,6 +344,18 @@ start()
|
|||
fi
|
||||
fi
|
||||
|
||||
# Copy over kube api server encryption provider config
|
||||
if [ -e $CONFIG_DIR/kubernetes/encryption-provider.yaml ]
|
||||
then
|
||||
cp $CONFIG_DIR/kubernetes/encryption-provider.yaml /etc/kubernetes/
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
fatal_error "Unable to copy kube api server encryption provider config file"
|
||||
else
|
||||
chmod 600 /etc/kubernetes/encryption-provider.yaml
|
||||
fi
|
||||
fi
|
||||
|
||||
# Keep the /opt/branding directory to preserve any new files
|
||||
rm -rf /opt/branding/*.tgz
|
||||
cp $CONFIG_DIR/branding/*.tgz /opt/branding 2>/dev/null
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
SRC_DIR="cgts-client"
|
||||
TIS_PATCH_VER=74
|
||||
TIS_PATCH_VER=75
|
||||
|
|
|
@ -36,3 +36,8 @@ class CertificateManager(base.Manager):
|
|||
def certificate_install(self, certificate_file, data=None):
|
||||
path = self._path("certificate_install")
|
||||
return self._upload(path, certificate_file, data=data)
|
||||
|
||||
def certificate_uninstall(self, uuid):
|
||||
path = self._path(uuid)
|
||||
_, body = self.api.json_request('DELETE', path)
|
||||
return body
|
||||
|
|
|
@ -100,9 +100,26 @@ def do_certificate_install(cc, args):
|
|||
raise exc.CommandError('Certificate %s not installed: %s' %
|
||||
(certificate_file, e))
|
||||
else:
|
||||
_print_certificate_show(response.get('certificates'))
|
||||
certificates = response.get('certificates')
|
||||
for certificate in certificates:
|
||||
_print_certificate_show(certificate)
|
||||
try:
|
||||
os.remove(certificate_file)
|
||||
except OSError:
|
||||
raise exc.CommandError('Error: Could not remove the '
|
||||
'certificate %s' % certificate_file)
|
||||
|
||||
@utils.arg('certificate_uuid', metavar='<certificate_uuid>',
|
||||
help="UUID of certificate to uninstall")
|
||||
@utils.arg('-m', '--mode',
|
||||
metavar='<mode>',
|
||||
help="Supported mode: 'ssl_ca'.")
|
||||
def do_certificate_uninstall(cc, args):
|
||||
"""Uninstall certificate."""
|
||||
|
||||
supported_modes = ['ssl_ca']
|
||||
if args.mode not in supported_modes:
|
||||
raise exc.CommandError('Unsupported mode: %s' % args.mode)
|
||||
|
||||
cc.certificate.certificate_uninstall(args.certificate_uuid)
|
||||
print('Uninstalled certificate: %s' % (args.certificate_uuid))
|
||||
|
|
|
@ -70,7 +70,7 @@ def _get_ports(cc, ihost, interface):
|
|||
|
||||
if interface.iftype == 'ethernet':
|
||||
interface.dpdksupport = [p.dpdksupport for p in ports]
|
||||
if interface.iftype == 'vlan':
|
||||
elif interface.iftype == 'vlan':
|
||||
interfaces = cc.iinterface.list(ihost.uuid)
|
||||
for u in interface.uses:
|
||||
for j in interfaces:
|
||||
|
@ -91,6 +91,13 @@ def _get_ports(cc, ihost, interface):
|
|||
if j.ifname == str(u):
|
||||
uses_ports = cc.iinterface.list_ports(j.uuid)
|
||||
interface.dpdksupport = [p.dpdksupport for p in uses_ports]
|
||||
elif interface.iftype == 'vf':
|
||||
interfaces = cc.iinterface.list(ihost.uuid)
|
||||
for u in interface.uses:
|
||||
u = next(j for j in interfaces if j.ifname == str(u))
|
||||
_get_ports(cc, ihost, u)
|
||||
if u.dpdksupport:
|
||||
interface.dpdksupport = u.dpdksupport
|
||||
|
||||
|
||||
def _find_interface(cc, ihost, ifnameoruuid):
|
||||
|
|
|
@ -48,3 +48,6 @@ class ptpManager(base.Manager):
|
|||
|
||||
def update(self, ptp_id, patch):
|
||||
return self._update(self._path(ptp_id), patch)
|
||||
|
||||
def apply(self):
|
||||
return self.api.json_request('POST', self._path() + "/apply")
|
||||
|
|
|
@ -73,3 +73,11 @@ def do_ptp_modify(cc, args):
|
|||
raise exc.CommandError('PTP not found: %s' % ptp.uuid)
|
||||
|
||||
_print_ptp_show(ptp)
|
||||
|
||||
|
||||
def do_ptp_apply(cc, args):
|
||||
"""Apply the PTP config."""
|
||||
|
||||
cc.ptp.apply()
|
||||
|
||||
print('Applying PTP configuration')
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
SRC_DIR="sysinv"
|
||||
TIS_PATCH_VER=344
|
||||
TIS_PATCH_VER=345
|
||||
|
|
|
@ -22,5 +22,4 @@ graft etc
|
|||
include sysinv/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||
include sysinv/openstack/common/config/generator.py
|
||||
include sysinv/tests/policy.json
|
||||
include sysinv/tests/db/sqlalchemy/test_migrations.conf
|
||||
graft tools
|
||||
|
|
|
@ -97,7 +97,7 @@
|
|||
|
||||
|
||||
#
|
||||
# Options defined in sysinv.openstack.common.db.sqlalchemy.session
|
||||
# Options defined in oslo_db
|
||||
#
|
||||
|
||||
# the filename to use with sqlite (string value)
|
||||
|
@ -467,7 +467,7 @@
|
|||
|
||||
|
||||
#
|
||||
# Options defined in sysinv.openstack.common.db.api
|
||||
# Options defined in oslo_db
|
||||
#
|
||||
|
||||
# The backend to use for db (string value)
|
||||
|
@ -479,12 +479,12 @@
|
|||
|
||||
|
||||
#
|
||||
# Options defined in sysinv.openstack.common.db.sqlalchemy.session
|
||||
# Options defined in oslo_db
|
||||
#
|
||||
|
||||
# The SQLAlchemy connection string used to connect to the
|
||||
# database (string value)
|
||||
#connection=sqlite:////sysinv.openstack.common/db/$sqlite_db
|
||||
#connection=sqlite:////oslo_db/$sqlite_db
|
||||
|
||||
# timeout before idle sql connections are reaped (integer
|
||||
# value)
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
[DEFAULT]
|
||||
module=config.generator
|
||||
module=context
|
||||
module=db
|
||||
module=db.sqlalchemy
|
||||
module=flakes
|
||||
module=install_venv_common
|
||||
module=local
|
||||
|
|
|
@ -20,6 +20,7 @@ oslo.i18n # Apache-2.0
|
|||
oslo.config>=3.7.0 # Apache-2.0
|
||||
oslo.concurrency>=3.7.1 # Apache-2.0
|
||||
oslo.db>=4.1.0 # Apache-2.0
|
||||
oslo.messaging!=9.0.0 # Apache-2.0
|
||||
oslo.service>=1.10.0 # Apache-2.0
|
||||
oslo.utils>=3.5.0 # Apache-2.0
|
||||
oslo.serialization>=1.10.0,!=2.19.1 # Apache-2.0
|
||||
|
|
|
@ -109,23 +109,23 @@ if [ ${DAY_LEFT_S} -lt ${NINETY_DAYS_S} ]; then
|
|||
ERR=1
|
||||
fi
|
||||
|
||||
# Restart docker container of k8s components to refresh the configurations within container
|
||||
# Restart the containers of k8s components to refresh the configurations within container
|
||||
if [ ${ERR} -eq 0 ]; then
|
||||
docker ps | awk '/k8s_kube-apiserver/{print$1}' | xargs docker restart > /dev/null
|
||||
crictl ps | awk '/kube-apiserver/{print$1}' | xargs crictl stop > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
ERR=2
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${ERR} -eq 0 ]; then
|
||||
docker ps | awk '/k8s_kube-controller-manager/{print$1}' | xargs docker restart > /dev/null
|
||||
crictl ps | awk '/kube-controller-manager/{print$1}' | xargs crictl stop > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
ERR=2
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${ERR} -eq 0 ]; then
|
||||
docker ps | awk '/k8s_kube-scheduler/{print$1}' | xargs docker restart > /dev/null
|
||||
crictl ps | awk '/kube-scheduler/{print$1}' | xargs crictl stop > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
ERR=2
|
||||
fi
|
||||
|
|
|
@ -45,7 +45,8 @@ ALLOWED_NETWORK_TYPES = [constants.NETWORK_TYPE_MGMT,
|
|||
constants.NETWORK_TYPE_OAM,
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.NETWORK_TYPE_DATA,
|
||||
constants.NETWORK_TYPE_IRONIC]
|
||||
constants.NETWORK_TYPE_IRONIC,
|
||||
constants.NETWORK_TYPE_STORAGE]
|
||||
|
||||
|
||||
class Address(base.APIBase):
|
||||
|
@ -87,7 +88,13 @@ class Address(base.APIBase):
|
|||
"The UUID of the address pool from which this address was allocated"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.fields = objects.address.fields.keys()
|
||||
# The interface_uuid in this `Address` type is kept to avoid changes to
|
||||
# API/CLI. However, `self.field` refers to `objects.address.field` which
|
||||
# doesn't include 'interface_uuid', and therefore it is added manually.
|
||||
# Otherwise, controller `Address.as_dict()` will not include `interface_uuid`
|
||||
# despite the field being present.
|
||||
self.fields = list(objects.address.fields.keys())
|
||||
self.fields.append('interface_uuid')
|
||||
for k in self.fields:
|
||||
if not hasattr(self, k):
|
||||
# Skip fields that we choose to hide
|
||||
|
@ -110,6 +117,9 @@ class Address(base.APIBase):
|
|||
@classmethod
|
||||
def convert_with_links(cls, rpc_address, expand=True):
|
||||
address = Address(**rpc_address.as_dict())
|
||||
if rpc_address.interface_id:
|
||||
address.interface_uuid = pecan.request.dbapi.iinterface_get(
|
||||
rpc_address.interface_id).uuid
|
||||
if not expand:
|
||||
address.unset_fields_except(['uuid', 'address',
|
||||
'prefix', 'interface_uuid', 'ifname',
|
||||
|
@ -244,9 +254,6 @@ class AddressController(rest.RestController):
|
|||
|
||||
def _check_interface_type(self, interface_id):
|
||||
interface = pecan.request.dbapi.iinterface_get(interface_id)
|
||||
if (interface['ifclass'] == constants.INTERFACE_CLASS_PLATFORM and
|
||||
interface['networktypelist'] is None):
|
||||
raise exception.InterfaceNetworkNotSet()
|
||||
for nt in interface['networktypelist']:
|
||||
if nt not in ALLOWED_NETWORK_TYPES:
|
||||
raise exception.UnsupportedInterfaceNetworkType(
|
||||
|
@ -288,7 +295,7 @@ class AddressController(rest.RestController):
|
|||
raise exception.AddressInSameSubnetExists(
|
||||
**{'address': entry['address'],
|
||||
'prefix': entry['prefix'],
|
||||
'interface': entry['interface_uuid']})
|
||||
'interface': entry['interface_id']})
|
||||
|
||||
def _check_address_count(self, interface_id, host_id):
|
||||
interface = pecan.request.dbapi.iinterface_get(interface_id)
|
||||
|
|
|
@ -445,13 +445,6 @@ def _create(ceph_mon):
|
|||
"replication is set to: %s'. Please update replication "
|
||||
"before configuring a monitor on a worker node." % supported_replication))
|
||||
|
||||
# host must be locked and online unless this is controller-0
|
||||
if (chost['hostname'] != constants.CONTROLLER_0_HOSTNAME and
|
||||
(chost['availability'] != constants.AVAILABILITY_ONLINE or
|
||||
chost['administrative'] != constants.ADMIN_LOCKED)):
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Host %s must be locked and online." % chost['hostname']))
|
||||
|
||||
ceph_mon = _set_defaults(ceph_mon)
|
||||
|
||||
# Size of ceph-mon logical volume must be the same for all
|
||||
|
|
|
@ -27,7 +27,6 @@ import wsme
|
|||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
|
@ -42,6 +41,7 @@ from sysinv.api.controllers.v1 import utils
|
|||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.openstack.common.rpc.common import RemoteError
|
||||
from wsme import types as wtypes
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
@ -322,23 +322,33 @@ class CertificateController(rest.RestController):
|
|||
error=("No certificates have been added, "
|
||||
"invalid PEM document: %s" % e))
|
||||
|
||||
# Extract the certificate from the pem file
|
||||
cert = x509.load_pem_x509_certificate(pem_contents,
|
||||
default_backend())
|
||||
|
||||
msg = self._check_cert_validity(cert)
|
||||
if msg is not True:
|
||||
# Extract the certificates from the pem file
|
||||
try:
|
||||
certs = cutils.extract_certs_from_pem(pem_contents)
|
||||
except Exception as e:
|
||||
msg = "No certificates have been added, %s" % e
|
||||
return dict(success="", error=msg)
|
||||
|
||||
if mode == constants.CERT_MODE_OPENSTACK:
|
||||
domain, msg = _check_endpoint_domain_exists()
|
||||
if domain:
|
||||
msg = _check_cert_dns_name(cert, domain)
|
||||
if msg is not True:
|
||||
return dict(success="", error=msg.message)
|
||||
elif msg:
|
||||
if not certs:
|
||||
msg = "No certificates have been added, " \
|
||||
"no valid certificates found in file."
|
||||
LOG.info(msg)
|
||||
return dict(success="", error=msg)
|
||||
|
||||
for cert in certs:
|
||||
msg = self._check_cert_validity(cert)
|
||||
if msg is not True:
|
||||
return dict(success="", error=msg)
|
||||
|
||||
if mode == constants.CERT_MODE_OPENSTACK:
|
||||
domain, msg = _check_endpoint_domain_exists()
|
||||
if domain:
|
||||
msg = _check_cert_dns_name(cert, domain)
|
||||
if msg is not True:
|
||||
return dict(success="", error=msg.message)
|
||||
elif msg:
|
||||
return dict(success="", error=msg)
|
||||
|
||||
if mode == constants.CERT_MODE_TPM:
|
||||
try:
|
||||
tpm = pecan.request.dbapi.tpmconfig_get_one()
|
||||
|
@ -364,63 +374,105 @@ class CertificateController(rest.RestController):
|
|||
config_dict = {'passphrase': passphrase,
|
||||
'mode': mode,
|
||||
}
|
||||
signature = pecan.request.rpcapi.config_certificate(
|
||||
inv_certs = pecan.request.rpcapi.config_certificate(
|
||||
pecan.request.context,
|
||||
pem_contents,
|
||||
config_dict)
|
||||
|
||||
except Exception as e:
|
||||
except RemoteError as e:
|
||||
msg = "Exception occurred e={}".format(e)
|
||||
LOG.info(msg)
|
||||
return dict(success="", error=str(e), body="", certificates={})
|
||||
LOG.warn(msg)
|
||||
return dict(success="", error=str(e.value), body="", certificates={})
|
||||
|
||||
# Update with installed certificate information
|
||||
values = {
|
||||
'certtype': mode,
|
||||
# TODO(jkung) 'issuer': cert.issuer,
|
||||
'signature': signature,
|
||||
'start_date': cert.not_valid_before,
|
||||
'expiry_date': cert.not_valid_after,
|
||||
}
|
||||
LOG.info("config_certificate values=%s" % values)
|
||||
certificates = pecan.request.dbapi.certificate_get_list()
|
||||
# ssl and ssl_tpm certs are mutual exclusive, so
|
||||
# if the new cert is a SSL cert, delete the existing TPM cert as well
|
||||
# if the new cert is a TPM cert, delete the existing SSL cert as well
|
||||
for certificate in certificates:
|
||||
if (mode == constants.CERT_MODE_SSL
|
||||
and certificate.certtype == constants.CERT_MODE_TPM) or \
|
||||
(mode == constants.CERT_MODE_TPM
|
||||
and certificate.certtype == constants.CERT_MODE_SSL):
|
||||
pecan.request.dbapi.certificate_destroy(certificate.uuid)
|
||||
|
||||
if mode in [constants.CERT_MODE_SSL, constants.CERT_MODE_TPM]:
|
||||
if mode == constants.CERT_MODE_SSL:
|
||||
remove_certtype = constants.CERT_MODE_TPM
|
||||
# Create new or update existing certificates in sysinv with the
|
||||
# information returned from conductor manager.
|
||||
certificate_dicts = []
|
||||
for inv_cert in inv_certs:
|
||||
values = {
|
||||
'certtype': mode,
|
||||
'signature': inv_cert.get('signature'),
|
||||
'start_date': inv_cert.get('not_valid_before'),
|
||||
'expiry_date': inv_cert.get('not_valid_after'),
|
||||
}
|
||||
LOG.info("config_certificate values=%s" % values)
|
||||
|
||||
# check to see if the installed cert exist in sysinv
|
||||
uuid = None
|
||||
for certificate in certificates:
|
||||
if mode == constants.CERT_MODE_SSL_CA:
|
||||
if inv_cert.get('signature') == certificate.signature:
|
||||
uuid = certificate.uuid
|
||||
break
|
||||
else:
|
||||
if mode == certificate.certtype:
|
||||
uuid = certificate.uuid
|
||||
break
|
||||
if uuid:
|
||||
certificate = pecan.request.dbapi.certificate_update(uuid,
|
||||
values)
|
||||
else:
|
||||
remove_certtype = constants.CERT_MODE_SSL
|
||||
try:
|
||||
remove_certificate = \
|
||||
pecan.request.dbapi.certificate_get_by_certtype(
|
||||
remove_certtype)
|
||||
LOG.info("remove certificate certtype=%s uuid`=%s" %
|
||||
(remove_certtype, remove_certificate.uuid))
|
||||
pecan.request.dbapi.certificate_destroy(
|
||||
remove_certificate.uuid)
|
||||
except exception.CertificateTypeNotFound:
|
||||
pass
|
||||
|
||||
try:
|
||||
certificate = \
|
||||
pecan.request.dbapi.certificate_get_by_certtype(
|
||||
mode)
|
||||
certificate = \
|
||||
pecan.request.dbapi.certificate_update(certificate.uuid,
|
||||
values)
|
||||
except exception.CertificateTypeNotFound:
|
||||
certificate = pecan.request.dbapi.certificate_create(values)
|
||||
pass
|
||||
|
||||
sp_certificates_dict = certificate.as_dict()
|
||||
|
||||
LOG.debug("certificate_install sp_certificates={}".format(
|
||||
sp_certificates_dict))
|
||||
certificate = pecan.request.dbapi.certificate_create(values)
|
||||
certificate_dict = certificate.as_dict()
|
||||
LOG.debug("certificate_install certificate={}".format(
|
||||
certificate_dict))
|
||||
certificate_dicts.append(certificate_dict)
|
||||
|
||||
log_end = cutils.timestamped("certificate_do_post_end")
|
||||
LOG.info("certificate %s" % log_end)
|
||||
|
||||
return dict(success="", error="", body="",
|
||||
certificates=sp_certificates_dict)
|
||||
certificates=certificate_dicts)
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(Certificate, types.uuid, status_code=200)
|
||||
def delete(self, certificate_uuid):
|
||||
"""Uninstall a certificate."""
|
||||
|
||||
# Only support ssl_ca cert type
|
||||
log_start = cutils.timestamped("certificate_do_delete_start")
|
||||
|
||||
try:
|
||||
certificate = pecan.request.dbapi.certificate_get(certificate_uuid)
|
||||
except exception.InvalidParameterValue:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("No certificate found for %s" % certificate_uuid))
|
||||
|
||||
if certificate and \
|
||||
certificate.certtype not in [constants.CERT_MODE_SSL_CA]:
|
||||
msg = "Unupported mode: {}".format(certificate.certtype)
|
||||
raise wsme.exc.ClientSideError(_(msg))
|
||||
|
||||
LOG.info("certificate %s certificate_uuid=%s" %
|
||||
(log_start, certificate_uuid))
|
||||
|
||||
try:
|
||||
pecan.request.rpcapi.delete_certificate(pecan.request.context,
|
||||
certificate.certtype,
|
||||
certificate.signature)
|
||||
except RemoteError as e:
|
||||
msg = "Exception occurred e={}".format(e)
|
||||
LOG.warn(msg)
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Failed to delete the certificate: %s, %s" %
|
||||
(certificate_uuid, str(e.value))))
|
||||
|
||||
pecan.request.dbapi.certificate_destroy(certificate_uuid)
|
||||
|
||||
log_end = cutils.timestamped("certificate_do_delete_end")
|
||||
LOG.info("certificate %s" % log_end)
|
||||
|
||||
return Certificate.convert_with_links(certificate)
|
||||
|
||||
|
||||
def _check_endpoint_domain_exists():
|
||||
|
|
|
@ -17,6 +17,8 @@ import wsme
|
|||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from oslo_db.exception import DBDuplicateEntry
|
||||
from oslo_db.exception import DBError
|
||||
from oslo_log import log
|
||||
from sysinv._i18n import _
|
||||
from sysinv.api.controllers.v1 import base
|
||||
|
@ -27,8 +29,6 @@ from sysinv.api.controllers.v1 import utils as api_utils
|
|||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv import objects
|
||||
from sysinv.openstack.common.db.exception import DBDuplicateEntry
|
||||
from sysinv.openstack.common.db.exception import DBError
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
|
||||
|
@ -257,21 +257,9 @@ def _check_controller_multi_fs(controller_fs_new_list,
|
|||
rootfs_configured_size_GiB)
|
||||
|
||||
if cgtsvg_growth_gib and (cgtsvg_growth_gib > cgtsvg_max_free_GiB):
|
||||
if ceph_mon_gib_new:
|
||||
msg = _(
|
||||
"Total target growth size %s GiB for database "
|
||||
"(doubled for upgrades), platform, "
|
||||
"scratch, backup, extension and ceph-mon exceeds "
|
||||
"growth limit of %s GiB." %
|
||||
(cgtsvg_growth_gib, cgtsvg_max_free_GiB)
|
||||
)
|
||||
else:
|
||||
msg = _(
|
||||
"Total target growth size %s GiB for database "
|
||||
"(doubled for upgrades), platform, scratch, "
|
||||
"backup and extension exceeds growth limit of %s GiB." %
|
||||
(cgtsvg_growth_gib, cgtsvg_max_free_GiB)
|
||||
)
|
||||
msg = _("Total target growth size %s GiB "
|
||||
"exceeds growth limit of %s GiB." %
|
||||
(cgtsvg_growth_gib, cgtsvg_max_free_GiB))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
|
@ -373,8 +361,8 @@ def _get_controller_cgtsvg_limit():
|
|||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
||||
ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):
|
||||
cgtsvg0_free_mib = (int(ilvg.lvm_vg_size) *
|
||||
int(ilvg.lvm_vg_free_pe) / int(
|
||||
ilvg.lvm_vg_total_pe)) / (1024 * 1024)
|
||||
int(ilvg.lvm_vg_free_pe) // int(
|
||||
ilvg.lvm_vg_total_pe)) // (1024 * 1024)
|
||||
break
|
||||
|
||||
else:
|
||||
|
@ -391,22 +379,22 @@ def _get_controller_cgtsvg_limit():
|
|||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
||||
ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):
|
||||
cgtsvg1_free_mib = (int(ilvg.lvm_vg_size) *
|
||||
int(ilvg.lvm_vg_free_pe) / int(
|
||||
ilvg.lvm_vg_total_pe)) / (1024 * 1024)
|
||||
int(ilvg.lvm_vg_free_pe) // int(
|
||||
ilvg.lvm_vg_total_pe)) // (1024 * 1024)
|
||||
break
|
||||
|
||||
LOG.info("_get_controller_cgtsvg_limit cgtsvg0_free_mib=%s, "
|
||||
"cgtsvg1_free_mib=%s" % (cgtsvg0_free_mib, cgtsvg1_free_mib))
|
||||
|
||||
if cgtsvg0_free_mib > 0 and cgtsvg1_free_mib > 0:
|
||||
cgtsvg_max_free_GiB = min(cgtsvg0_free_mib, cgtsvg1_free_mib) / 1024
|
||||
cgtsvg_max_free_GiB = min(cgtsvg0_free_mib, cgtsvg1_free_mib) // 1024
|
||||
LOG.info("min of cgtsvg0_free_mib=%s and cgtsvg1_free_mib=%s is "
|
||||
"cgtsvg_max_free_GiB=%s" %
|
||||
(cgtsvg0_free_mib, cgtsvg1_free_mib, cgtsvg_max_free_GiB))
|
||||
elif cgtsvg1_free_mib > 0:
|
||||
cgtsvg_max_free_GiB = cgtsvg1_free_mib / 1024
|
||||
cgtsvg_max_free_GiB = cgtsvg1_free_mib // 1024
|
||||
else:
|
||||
cgtsvg_max_free_GiB = cgtsvg0_free_mib / 1024
|
||||
cgtsvg_max_free_GiB = cgtsvg0_free_mib // 1024
|
||||
|
||||
LOG.info("SYS_I filesystem limits cgtsvg0_free_mib=%s, "
|
||||
"cgtsvg1_free_mib=%s, cgtsvg_max_free_GiB=%s"
|
||||
|
@ -462,7 +450,7 @@ def _check_controller_multi_fs_data(context, controller_fs_list_new):
|
|||
orig = int(float(lvdisplay_dict[lv]))
|
||||
new = int(fs.size)
|
||||
if fs.name == constants.FILESYSTEM_NAME_DATABASE:
|
||||
orig = orig / 2
|
||||
orig = orig // 2
|
||||
|
||||
if orig > new:
|
||||
raise wsme.exc.ClientSideError(_("'%s' must be at least: "
|
||||
|
|
|
@ -143,7 +143,7 @@ class DNSCollection(collection.Collection):
|
|||
##############
|
||||
# UTILS
|
||||
##############
|
||||
def _check_dns_data(dns):
|
||||
def _check_dns_data(dns, ip_family):
|
||||
# Get data
|
||||
nameservers = dns['nameservers']
|
||||
idns_nameservers_list = []
|
||||
|
@ -157,20 +157,25 @@ def _check_dns_data(dns):
|
|||
ntp_list = pecan.request.dbapi.intp_get_by_isystem(dns['isystem_uuid'])
|
||||
|
||||
if nameservers:
|
||||
for nameservers in [n.strip() for n in nameservers.split(',')]:
|
||||
for nameserver in [n.strip() for n in nameservers.split(',')]:
|
||||
# Semantic check each server as IP
|
||||
try:
|
||||
idns_nameservers_list.append(str(IPAddress(nameservers)))
|
||||
idns_nameservers_list.append(str(IPAddress(nameserver)))
|
||||
if ip_family and IPAddress(nameserver).version != ip_family:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"IP version mismatch: was expecting "
|
||||
"IPv%d, IPv%d received") % (ip_family,
|
||||
IPAddress(nameserver).version))
|
||||
except (AddrFormatError, ValueError):
|
||||
|
||||
if nameservers == 'NC':
|
||||
if nameserver == 'NC':
|
||||
idns_nameservers_list.append(str(""))
|
||||
break
|
||||
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Invalid DNS nameserver target address %s "
|
||||
"Please configure a valid DNS "
|
||||
"address.") % (nameservers))
|
||||
"address.") % (nameserver))
|
||||
|
||||
if len(idns_nameservers_list) == 0 or idns_nameservers_list == [""]:
|
||||
if ntp_list:
|
||||
|
@ -336,8 +341,16 @@ class DNSController(rest.RestController):
|
|||
except utils.JSONPATCH_EXCEPTIONS as e:
|
||||
raise exception.PatchError(patch=patch, reason=e)
|
||||
|
||||
LOG.warn("dns %s" % dns.as_dict())
|
||||
dns = _check_dns_data(dns.as_dict())
|
||||
# Since dns requests on the controller go over the oam network,
|
||||
# check the ip version of the oam address pool in the database
|
||||
oam_network = pecan.request.dbapi.network_get_by_type(
|
||||
constants.NETWORK_TYPE_OAM)
|
||||
oam_address_pool = pecan.request.dbapi.address_pool_get(
|
||||
oam_network.pool_uuid)
|
||||
ip_family = oam_address_pool.family
|
||||
|
||||
LOG.info("dns %s; ip_family: ipv%d" % (dns.as_dict(), ip_family))
|
||||
dns = _check_dns_data(dns.as_dict(), ip_family)
|
||||
|
||||
try:
|
||||
# Update only the fields that have changed
|
||||
|
|
|
@ -3343,6 +3343,22 @@ class HostController(rest.RestController):
|
|||
|
||||
self._check_sriovdp_interface_datanets(interface)
|
||||
|
||||
def _semantic_check_unlock_kube_upgrade(self, ihost, force_unlock=False):
|
||||
"""
|
||||
Perform semantic checks related to kubernetes upgrades prior to unlocking host.
|
||||
"""
|
||||
if force_unlock:
|
||||
LOG.warning("Host %s force unlock while kubelet upgrade "
|
||||
"in progress." % ihost['hostname'])
|
||||
return
|
||||
|
||||
kube_host_upgrade = \
|
||||
pecan.request.dbapi.kube_host_upgrade_get_by_host(ihost['uuid'])
|
||||
if kube_host_upgrade.status == kubernetes.KUBE_HOST_UPGRADING_KUBELET:
|
||||
msg = _("Can not unlock %s while upgrading kubelet. "
|
||||
"Wait for kubelet upgrade to complete." % ihost['hostname'])
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
def _semantic_check_unlock_upgrade(self, ihost, force_unlock=False):
|
||||
"""
|
||||
Perform semantic checks related to upgrades prior to unlocking host.
|
||||
|
@ -5400,6 +5416,7 @@ class HostController(rest.RestController):
|
|||
"""Pre unlock semantic checks for controller"""
|
||||
LOG.info("%s ihost check_unlock_controller" % hostupdate.displayid)
|
||||
self._semantic_check_unlock_upgrade(hostupdate.ihost_orig, force_unlock)
|
||||
self._semantic_check_unlock_kube_upgrade(hostupdate.ihost_orig, force_unlock)
|
||||
self._semantic_check_oam_interface(hostupdate.ihost_orig)
|
||||
self._semantic_check_cinder_volumes(hostupdate.ihost_orig)
|
||||
self._semantic_check_filesystem_sizes(hostupdate.ihost_orig)
|
||||
|
@ -5418,6 +5435,10 @@ class HostController(rest.RestController):
|
|||
"configure host and wait for Availability State "
|
||||
"'online' prior to unlock." % hostupdate.displayid))
|
||||
|
||||
# Check if kubernetes upgrade is in-progress
|
||||
if cutils.is_std_system(pecan.request.dbapi):
|
||||
self._semantic_check_unlock_kube_upgrade(hostupdate.ihost_orig, force_unlock)
|
||||
|
||||
# Check whether a restore was properly completed
|
||||
self._semantic_check_restore_complete(ihost)
|
||||
# Disable certain worker unlock checks in a kubernetes config
|
||||
|
@ -6618,6 +6639,12 @@ class HostController(rest.RestController):
|
|||
cp_versions = self._kube_operator.kube_get_control_plane_versions()
|
||||
current_cp_version = cp_versions.get(host_obj.hostname)
|
||||
if current_cp_version == kube_upgrade_obj.to_version:
|
||||
# Make sure we are not attempting to upgrade the first upgraded
|
||||
# control plane again after networking was upgraded
|
||||
if kube_upgrade_obj.state == kubernetes.KUBE_UPGRADED_NETWORKING:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The first control plane was already upgraded."))
|
||||
|
||||
# The control plane was already upgraded, but we didn't progress
|
||||
# to the next state, so something failed along the way.
|
||||
LOG.info("Redoing kubernetes control plane upgrade for %s" %
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -33,7 +33,7 @@ class HostFsPatchType(types.JsonPatchType):
|
|||
|
||||
|
||||
class HostFs(base.APIBase):
|
||||
"""API representation of a ilvg.
|
||||
"""API representation of a host_fs.
|
||||
|
||||
This class enforces type checking and value constraints, and converts
|
||||
between the internal object model and the API representation of
|
||||
|
@ -154,7 +154,7 @@ class HostFsController(rest.RestController):
|
|||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.lvg.get_by_uuid(
|
||||
marker_obj = objects.host_fs.get_by_uuid(
|
||||
pecan.request.context,
|
||||
marker)
|
||||
|
||||
|
@ -208,7 +208,7 @@ class HostFsController(rest.RestController):
|
|||
if self._from_ihosts:
|
||||
raise exception.OperationNotPermitted
|
||||
|
||||
rpc_host_fs = objects.lvg.get_by_uuid(pecan.request.context,
|
||||
rpc_host_fs = objects.host_fs.get_by_uuid(pecan.request.context,
|
||||
host_fs_uuid)
|
||||
return HostFs.convert_with_links(rpc_host_fs)
|
||||
|
||||
|
@ -326,7 +326,7 @@ class HostFsController(rest.RestController):
|
|||
filesystem_list=modified_fs,)
|
||||
|
||||
except Exception as e:
|
||||
msg = _("Failed to update filesystem size for %s" % host.name)
|
||||
msg = _("Failed to update filesystem size for %s" % host.hostname)
|
||||
LOG.error("%s with patch %s with exception %s" % (msg, patch, e))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
|
|
@ -68,7 +68,8 @@ VALID_NETWORK_TYPES = [constants.NETWORK_TYPE_NONE,
|
|||
constants.NETWORK_TYPE_DATA,
|
||||
constants.NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||
constants.NETWORK_TYPE_PCI_SRIOV,
|
||||
constants.NETWORK_TYPE_IRONIC]
|
||||
constants.NETWORK_TYPE_IRONIC,
|
||||
constants.NETWORK_TYPE_STORAGE]
|
||||
|
||||
VALID_INTERFACE_CLASS = [constants.INTERFACE_CLASS_PLATFORM,
|
||||
constants.INTERFACE_CLASS_DATA,
|
||||
|
@ -466,21 +467,25 @@ class InterfaceController(rest.RestController):
|
|||
_check_interface_mtu(temp_interface.as_dict(), ihost)
|
||||
|
||||
# Check SR-IOV before updating the ports
|
||||
sriov_numvfs = None
|
||||
sriov_vf_driver = None
|
||||
for p in patch:
|
||||
if '/ifclass' == p['path']:
|
||||
temp_interface['ifclass'] = p['value']
|
||||
elif '/sriov_numvfs' == p['path']:
|
||||
temp_interface['sriov_numvfs'] = p['value']
|
||||
sriov_numvfs = p['value']
|
||||
temp_interface['sriov_numvfs'] = sriov_numvfs
|
||||
elif '/sriov_vf_driver' == p['path']:
|
||||
temp_interface['sriov_vf_driver'] = p['value']
|
||||
sriov_vf_driver = p['value']
|
||||
temp_interface['sriov_vf_driver'] = sriov_vf_driver
|
||||
|
||||
# If network type is not pci-sriov, reset the sriov-numvfs to zero
|
||||
if (temp_interface['sriov_numvfs'] is not None and
|
||||
temp_interface['ifclass'] is not None and
|
||||
temp_interface[
|
||||
'ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV):
|
||||
temp_interface['sriov_numvfs'] = None
|
||||
temp_interface['sriov_vf_driver'] = None
|
||||
# If the interface class is no longer pci-sriov, reset the VF
|
||||
# parameters if they haven't been specified in the patch
|
||||
if temp_interface['ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV:
|
||||
if sriov_numvfs is None:
|
||||
temp_interface['sriov_numvfs'] = 0
|
||||
if sriov_vf_driver is None:
|
||||
temp_interface['sriov_vf_driver'] = None
|
||||
|
||||
sriov_update = _check_interface_sriov(temp_interface.as_dict(), ihost)
|
||||
|
||||
|
@ -550,6 +555,11 @@ class InterfaceController(rest.RestController):
|
|||
ports=ports, ifaces=uses,
|
||||
existing_interface=rpc_interface.as_dict())
|
||||
|
||||
# Clear the vf fields if class is not sriov
|
||||
if interface['ifclass'] != constants.INTERFACE_CLASS_PCI_SRIOV:
|
||||
interface["sriov_numvfs"] = 0
|
||||
interface["sriov_vf_driver"] = None
|
||||
|
||||
if uses:
|
||||
# Update MAC address if uses list changed
|
||||
interface = set_interface_mac(ihost, interface)
|
||||
|
@ -842,17 +852,8 @@ def _check_interface_sriov(interface, ihost, from_profile=False):
|
|||
|
||||
|
||||
def _check_host(ihost):
|
||||
if utils.is_aio_simplex_host_unlocked(ihost):
|
||||
if ihost['administrative'] != constants.ADMIN_LOCKED:
|
||||
raise wsme.exc.ClientSideError(_("Host must be locked."))
|
||||
elif ihost['administrative'] != 'locked' and not \
|
||||
utils.is_host_simplex_controller(ihost):
|
||||
unlocked = False
|
||||
current_ihosts = pecan.request.dbapi.ihost_get_list()
|
||||
for h in current_ihosts:
|
||||
if h['administrative'] != 'locked' and h['hostname'] != ihost['hostname']:
|
||||
unlocked = True
|
||||
if unlocked:
|
||||
raise wsme.exc.ClientSideError(_("Host must be locked."))
|
||||
|
||||
|
||||
def _check_interface_class_and_host_type(ihost, interface):
|
||||
|
@ -1949,6 +1950,5 @@ def _is_interface_address_allowed(interface):
|
|||
elif interface['ifclass'] == constants.INTERFACE_CLASS_DATA:
|
||||
return True
|
||||
elif interface['ifclass'] == constants.INTERFACE_CLASS_PLATFORM:
|
||||
if any(nt in address.ALLOWED_NETWORK_TYPES for nt in interface['networktypelist'] or []):
|
||||
return True
|
||||
return True
|
||||
return False
|
||||
|
|
|
@ -51,7 +51,8 @@ NONASSIGNABLE_NETWORK_TYPES = (constants.NETWORK_TYPE_DATA,
|
|||
NONDUPLICATE_NETWORK_TYPES = (constants.NETWORK_TYPE_MGMT,
|
||||
constants.NETWORK_TYPE_OAM,
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.NETWORK_TYPE_PXEBOOT)
|
||||
constants.NETWORK_TYPE_PXEBOOT,
|
||||
constants.NETWORK_TYPE_STORAGE)
|
||||
|
||||
|
||||
class InterfaceNetwork(base.APIBase):
|
||||
|
@ -413,6 +414,8 @@ def _update_host_address(host, interface, network_type):
|
|||
_update_host_cluster_address(host, interface)
|
||||
elif network_type == constants.NETWORK_TYPE_IRONIC:
|
||||
_update_host_ironic_address(host, interface)
|
||||
elif network_type == constants.NETWORK_TYPE_STORAGE:
|
||||
_update_host_storage_address(host, interface)
|
||||
if host.personality == constants.CONTROLLER:
|
||||
if network_type == constants.NETWORK_TYPE_OAM:
|
||||
_update_host_oam_address(host, interface)
|
||||
|
@ -501,6 +504,23 @@ def _update_host_ironic_address(host, interface):
|
|||
pecan.request.dbapi.address_update(address.uuid, updates)
|
||||
|
||||
|
||||
def _update_host_storage_address(host, interface):
|
||||
address_name = cutils.format_address_name(host.hostname,
|
||||
constants.NETWORK_TYPE_STORAGE)
|
||||
try:
|
||||
address = pecan.request.dbapi.address_get_by_name(address_name)
|
||||
updates = {'interface_id': interface['id']}
|
||||
pecan.request.dbapi.address_update(address.uuid, updates)
|
||||
except exception.AddressNotFoundByName:
|
||||
# For non-controller hosts, allocate address from pool if dynamic
|
||||
storage_network = pecan.request.dbapi.network_get_by_type(
|
||||
constants.NETWORK_TYPE_STORAGE)
|
||||
if storage_network.dynamic:
|
||||
_allocate_pool_address(interface['id'],
|
||||
storage_network.pool_uuid,
|
||||
address_name)
|
||||
|
||||
|
||||
def _update_host_mgmt_mac(host, mgmt_mac):
|
||||
"""Update host mgmt mac to reflect interface change.
|
||||
"""
|
||||
|
|
|
@ -24,6 +24,7 @@ from sysinv.api.controllers.v1 import types
|
|||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.common import kubernetes
|
||||
from sysinv.helm import common as helm_common
|
||||
|
||||
import cgcs_patch.constants as patch_constants
|
||||
|
@ -437,6 +438,14 @@ class KubeAppController(rest.RestController):
|
|||
else:
|
||||
mode = values['mode']
|
||||
|
||||
try:
|
||||
app_helper = KubeAppHelper(pecan.request.dbapi)
|
||||
app_helper._check_app_compatibility(db_app.name,
|
||||
db_app.app_version)
|
||||
except exception.IncompatibleKubeVersion as e:
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Application-apply rejected: " + str(e)))
|
||||
|
||||
self._semantic_check(db_app)
|
||||
|
||||
if db_app.status == constants.APP_APPLY_IN_PROGRESS:
|
||||
|
@ -596,6 +605,7 @@ class KubeAppHelper(object):
|
|||
|
||||
def __init__(self, dbapi):
|
||||
self._dbapi = dbapi
|
||||
self._kube_operator = kubernetes.KubeOperator()
|
||||
|
||||
def _check_patching_operation(self):
|
||||
try:
|
||||
|
@ -652,6 +662,25 @@ class KubeAppHelper(object):
|
|||
"Error while reporting the patch dependencies "
|
||||
"to patch-controller.")
|
||||
|
||||
def _check_app_compatibility(self, app_name, app_version):
|
||||
"""Checks whether the application is compatible
|
||||
with the current k8s version"""
|
||||
|
||||
kube_min_version, kube_max_version = \
|
||||
cutils.get_app_supported_kube_version(app_name, app_version)
|
||||
|
||||
if not kube_min_version and not kube_max_version:
|
||||
return
|
||||
|
||||
version_states = self._kube_operator.kube_get_version_states()
|
||||
for kube_version, state in version_states.items():
|
||||
if state in [kubernetes.KUBE_STATE_ACTIVE,
|
||||
kubernetes.KUBE_STATE_PARTIAL]:
|
||||
if not kubernetes.is_kube_version_supported(
|
||||
kube_version, kube_min_version, kube_max_version):
|
||||
raise exception.IncompatibleKubeVersion(
|
||||
name=app_name, version=app_version, kube_version=kube_version)
|
||||
|
||||
def _find_manifest_file(self, app_path):
|
||||
# It is expected that there is only one manifest file
|
||||
# per application and the file exists at top level of
|
||||
|
|
|
@ -148,6 +148,24 @@ class KubeUpgradeController(rest.RestController):
|
|||
"the kubernetes upgrade: %s" %
|
||||
available_patches))
|
||||
|
||||
@staticmethod
|
||||
def _check_installed_apps_compatibility(apps, kube_version):
|
||||
"""Checks whether all installed applications are compatible
|
||||
with the new k8s version"""
|
||||
|
||||
for app in apps:
|
||||
if app.status != constants.APP_APPLY_SUCCESS:
|
||||
continue
|
||||
|
||||
kube_min_version, kube_max_version = \
|
||||
cutils.get_app_supported_kube_version(app.name, app.app_version)
|
||||
|
||||
if not kubernetes.is_kube_version_supported(
|
||||
kube_version, kube_min_version, kube_max_version):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"The installed Application %s (%s) is incompatible with the "
|
||||
"new Kubernetes version %s." % (app.name, app.app_version, kube_version)))
|
||||
|
||||
@wsme_pecan.wsexpose(KubeUpgradeCollection)
|
||||
def get_all(self):
|
||||
"""Retrieve a list of kubernetes upgrades."""
|
||||
|
@ -221,7 +239,10 @@ class KubeUpgradeController(rest.RestController):
|
|||
applied_patches=target_version_obj.applied_patches,
|
||||
available_patches=target_version_obj.available_patches)
|
||||
|
||||
# TODO: check that all installed applications support new k8s version
|
||||
# Check that all installed applications support new k8s version
|
||||
apps = pecan.request.dbapi.kube_app_get_all()
|
||||
self._check_installed_apps_compatibility(apps, to_version)
|
||||
|
||||
# TODO: check that tiller/armada support new k8s version
|
||||
|
||||
# The system must be healthy
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2017 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
import jsonpatch
|
||||
|
@ -147,7 +147,7 @@ class LVG(base.APIBase):
|
|||
# lvm_vg_total_pe is Volume Group's total Physical Extents
|
||||
if lvg.lvm_vg_total_pe and lvg.lvm_vg_total_pe > 0:
|
||||
lvg.lvm_vg_avail_size = \
|
||||
lvg.lvm_vg_size * lvg.lvm_vg_free_pe / lvg.lvm_vg_total_pe
|
||||
lvg.lvm_vg_size * lvg.lvm_vg_free_pe // lvg.lvm_vg_total_pe
|
||||
else:
|
||||
lvg.lvm_vg_avail_size = 0
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ ALLOWED_NETWORK_TYPES = [constants.NETWORK_TYPE_MGMT,
|
|||
constants.NETWORK_TYPE_CLUSTER_SERVICE,
|
||||
constants.NETWORK_TYPE_IRONIC,
|
||||
constants.NETWORK_TYPE_SYSTEM_CONTROLLER_OAM,
|
||||
constants.NETWORK_TYPE_STORAGE,
|
||||
]
|
||||
|
||||
|
||||
|
@ -184,7 +185,8 @@ class NetworkController(rest.RestController):
|
|||
addresses = self._create_ironic_network_address()
|
||||
elif network['type'] == constants.NETWORK_TYPE_SYSTEM_CONTROLLER:
|
||||
addresses = self._create_system_controller_network_address(pool)
|
||||
|
||||
elif network['type'] == constants.NETWORK_TYPE_STORAGE:
|
||||
addresses = self._create_storage_network_address()
|
||||
else:
|
||||
return
|
||||
self._populate_network_addresses(pool, network, addresses)
|
||||
|
@ -261,6 +263,13 @@ class NetworkController(rest.RestController):
|
|||
addresses[constants.CONTROLLER_1_HOSTNAME] = None
|
||||
return addresses
|
||||
|
||||
def _create_storage_network_address(self):
|
||||
addresses = collections.OrderedDict()
|
||||
addresses[constants.CONTROLLER_HOSTNAME] = None
|
||||
addresses[constants.CONTROLLER_0_HOSTNAME] = None
|
||||
addresses[constants.CONTROLLER_1_HOSTNAME] = None
|
||||
return addresses
|
||||
|
||||
def _populate_network_addresses(self, pool, network, addresses):
|
||||
opt_fields = {}
|
||||
for name, address in addresses.items():
|
||||
|
@ -368,7 +377,8 @@ class NetworkController(rest.RestController):
|
|||
constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.NETWORK_TYPE_PXEBOOT,
|
||||
constants.NETWORK_TYPE_CLUSTER_POD,
|
||||
constants.NETWORK_TYPE_CLUSTER_SERVICE]:
|
||||
constants.NETWORK_TYPE_CLUSTER_SERVICE,
|
||||
constants.NETWORK_TYPE_STORAGE]:
|
||||
msg = _("Cannot delete type {} network {} after initial "
|
||||
"configuration completion"
|
||||
.format(network['type'], network_uuid))
|
||||
|
|
|
@ -56,7 +56,7 @@ from sysinv.common import utils as cutils
|
|||
import xml.etree.ElementTree as et
|
||||
from lxml import etree
|
||||
from sysinv.api.controllers.v1 import profile_utils
|
||||
from sysinv.openstack.common.db import exception as dbException
|
||||
from oslo_db import exception as dbException
|
||||
from wsme import types as wtypes
|
||||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
|
||||
|
|
|
@ -120,6 +120,7 @@ class PTPController(rest.RestController):
|
|||
|
||||
_custom_actions = {
|
||||
'detail': ['GET'],
|
||||
'apply': ['POST']
|
||||
}
|
||||
|
||||
def _get_ptps_collection(self, marker, limit, sort_key, sort_dir,
|
||||
|
@ -257,3 +258,13 @@ class PTPController(rest.RestController):
|
|||
def delete(self, ptp_uuid):
|
||||
"""Delete a ptp."""
|
||||
raise exception.OperationNotPermitted
|
||||
|
||||
@cutils.synchronized(LOCK_NAME)
|
||||
@wsme_pecan.wsexpose(None, status_code=204)
|
||||
def apply(self):
|
||||
"""Apply the ptp configuration."""
|
||||
try:
|
||||
pecan.request.rpcapi.update_ptp_config(pecan.request.context, do_apply=True)
|
||||
except exception.HTTPNotFound:
|
||||
msg = _("PTP apply failed")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
|
|
@ -237,7 +237,8 @@ class ServiceParameterController(rest.RestController):
|
|||
schema = service_parameter.SERVICE_PARAMETER_SCHEMA[service][section]
|
||||
parameters = (schema.get(service_parameter.SERVICE_PARAM_MANDATORY, []) +
|
||||
schema.get(service_parameter.SERVICE_PARAM_OPTIONAL, []))
|
||||
if name not in parameters:
|
||||
has_wildcard = (constants.SERVICE_PARAM_NAME_WILDCARD in parameters)
|
||||
if name not in parameters and not has_wildcard:
|
||||
msg = _("The parameter name %s is invalid for "
|
||||
"service %s section %s"
|
||||
% (name, service, section))
|
||||
|
@ -649,6 +650,53 @@ class ServiceParameterController(rest.RestController):
|
|||
raise wsme.exc.ClientSideError(
|
||||
_("Host %s must be unlocked and enabled." % host_id))
|
||||
|
||||
@staticmethod
|
||||
def _service_parameter_apply_semantic_check_kubernetes():
|
||||
"""Semantic checks for the Platform Kubernetes Service Type """
|
||||
try:
|
||||
oidc_issuer_url = pecan.request.dbapi.service_parameter_get_one(
|
||||
service=constants.SERVICE_TYPE_KUBERNETES,
|
||||
section=constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
name=constants.SERVICE_PARAM_NAME_OIDC_ISSUER_URL)
|
||||
except exception.NotFound:
|
||||
oidc_issuer_url = None
|
||||
|
||||
try:
|
||||
oidc_client_id = pecan.request.dbapi.service_parameter_get_one(
|
||||
service=constants.SERVICE_TYPE_KUBERNETES,
|
||||
section=constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
name=constants.SERVICE_PARAM_NAME_OIDC_CLIENT_ID)
|
||||
except exception.NotFound:
|
||||
oidc_client_id = None
|
||||
|
||||
try:
|
||||
oidc_username_claim = pecan.request.dbapi.service_parameter_get_one(
|
||||
service=constants.SERVICE_TYPE_KUBERNETES,
|
||||
section=constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
name=constants.SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM)
|
||||
except exception.NotFound:
|
||||
oidc_username_claim = None
|
||||
|
||||
try:
|
||||
oidc_groups_claim = pecan.request.dbapi.service_parameter_get_one(
|
||||
service=constants.SERVICE_TYPE_KUBERNETES,
|
||||
section=constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
name=constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM)
|
||||
except exception.NotFound:
|
||||
oidc_groups_claim = None
|
||||
|
||||
if not ((not oidc_issuer_url and not oidc_client_id and
|
||||
not oidc_username_claim and not oidc_groups_claim) or
|
||||
(oidc_issuer_url and oidc_client_id and
|
||||
oidc_username_claim and not oidc_groups_claim) or
|
||||
(oidc_issuer_url and oidc_client_id and
|
||||
oidc_username_claim and oidc_groups_claim)):
|
||||
msg = _("Unable to apply service parameters. Please choose one of "
|
||||
"the valid Kubernetes OIDC parameter setups: (None) or "
|
||||
"(oidc_issuer_url, oidc_client_id, oidc_username_claim) or "
|
||||
"(the previous 3 plus oidc_groups_claim)")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
def _service_parameter_apply_semantic_check(self, service):
|
||||
"""Semantic checks for the service-parameter-apply command """
|
||||
|
||||
|
@ -669,9 +717,12 @@ class ServiceParameterController(rest.RestController):
|
|||
if service == constants.SERVICE_TYPE_PLATFORM:
|
||||
self._service_parameter_apply_semantic_check_mtce()
|
||||
|
||||
if service == constants.SERVICE_TYPE_HTTP:
|
||||
elif service == constants.SERVICE_TYPE_HTTP:
|
||||
self._service_parameter_apply_semantic_check_http()
|
||||
|
||||
elif service == constants.SERVICE_TYPE_KUBERNETES:
|
||||
self._service_parameter_apply_semantic_check_kubernetes()
|
||||
|
||||
def _get_service(self, body):
|
||||
service = body.get('service') or ""
|
||||
if not service:
|
||||
|
|
|
@ -492,6 +492,34 @@ def _discover_and_validate_rbd_provisioner_capabilities(caps_dict, storage_ceph)
|
|||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
def _create_default_ceph_db_entries():
|
||||
try:
|
||||
isystem = pecan.request.dbapi.isystem_get_one()
|
||||
except exception.NotFound:
|
||||
# When adding the backend, the system DB entry should
|
||||
# have already been created, but it's safer to just check
|
||||
LOG.info('System is not configured. Cannot create Cluster '
|
||||
'DB entry')
|
||||
return
|
||||
LOG.info("Create new ceph cluster record")
|
||||
# Create the default primary cluster
|
||||
db_cluster = pecan.request.dbapi.cluster_create(
|
||||
{'uuid': uuidutils.generate_uuid(),
|
||||
'cluster_uuid': None,
|
||||
'type': constants.SB_TYPE_CEPH,
|
||||
'name': 'ceph_cluster',
|
||||
'system_id': isystem.id})
|
||||
|
||||
# Create the default primary ceph storage tier
|
||||
LOG.info("Create primary ceph tier record.")
|
||||
pecan.request.dbapi.storage_tier_create(
|
||||
{'forclusterid': db_cluster.id,
|
||||
'name': constants.SB_TIER_DEFAULT_NAMES[constants.SB_TIER_TYPE_CEPH],
|
||||
'type': constants.SB_TIER_TYPE_CEPH,
|
||||
'status': constants.SB_TIER_STATUS_DEFINED,
|
||||
'capabilities': {}})
|
||||
|
||||
|
||||
def _check_backend_ceph(req, storage_ceph, confirmed=False):
|
||||
# check for the backend parameters
|
||||
capabilities = storage_ceph.get('capabilities', {})
|
||||
|
@ -561,8 +589,21 @@ def _check_backend_ceph(req, storage_ceph, confirmed=False):
|
|||
{'name': constants.SB_TIER_DEFAULT_NAMES[
|
||||
constants.SB_TIER_TYPE_CEPH]})
|
||||
except exception.StorageTierNotFoundByName:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Default tier not found for this backend."))
|
||||
try:
|
||||
# When we try to create the default storage backend
|
||||
# it expects the default cluster and storage tier
|
||||
# to be already created.
|
||||
# They were initially created when conductor started,
|
||||
# but since ceph is no longer enabled by default, we
|
||||
# should just create it here.
|
||||
_create_default_ceph_db_entries()
|
||||
tier = pecan.request.dbapi.storage_tier_query(
|
||||
{'name': constants.SB_TIER_DEFAULT_NAMES[
|
||||
constants.SB_TIER_TYPE_CEPH]})
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Error creating default ceph database entries"))
|
||||
else:
|
||||
raise wsme.exc.ClientSideError(_("No tier specified for this "
|
||||
"backend."))
|
||||
|
@ -692,7 +733,8 @@ def _check_and_update_rbd_provisioner(new_storceph, remove=False):
|
|||
def _apply_backend_changes(op, sb_obj):
|
||||
services = api_helper.getListFromServices(sb_obj.as_dict())
|
||||
|
||||
if op == constants.SB_API_OP_MODIFY:
|
||||
if (op == constants.SB_API_OP_MODIFY or
|
||||
op == constants.SB_API_OP_CREATE):
|
||||
if sb_obj.name == constants.SB_DEFAULT_NAMES[
|
||||
constants.SB_TYPE_CEPH]:
|
||||
|
||||
|
@ -820,8 +862,16 @@ def _create(storage_ceph):
|
|||
# Retrieve the main StorageBackend object.
|
||||
storage_backend_obj = pecan.request.dbapi.storage_backend_get(storage_ceph_obj.id)
|
||||
|
||||
# Enable the backend:
|
||||
_apply_backend_changes(constants.SB_API_OP_CREATE, storage_backend_obj)
|
||||
# Only apply runtime manifests if at least one controller is unlocked and
|
||||
# available/degraded.
|
||||
controller_hosts = pecan.request.dbapi.ihost_get_by_personality(
|
||||
constants.CONTROLLER)
|
||||
valid_controller_hosts = [h for h in controller_hosts if
|
||||
h['administrative'] == constants.ADMIN_UNLOCKED and
|
||||
h['availability'] in [constants.AVAILABILITY_AVAILABLE,
|
||||
constants.AVAILABILITY_DEGRADED]]
|
||||
if valid_controller_hosts:
|
||||
_apply_backend_changes(constants.SB_API_OP_CREATE, storage_backend_obj)
|
||||
|
||||
return storage_ceph_obj
|
||||
|
||||
|
|
|
@ -461,12 +461,14 @@ def get_node_cgtsvg_limit(host):
|
|||
for ilvg in ilvgs:
|
||||
if (ilvg.lvm_vg_name == constants.LVG_CGTS_VG and
|
||||
ilvg.lvm_vg_size and ilvg.lvm_vg_total_pe):
|
||||
# Integer division in Python 2 behaves like floating point division
|
||||
# in Python 3. Replacing / by // rectifies this behavior.
|
||||
cgtsvg_free_mib = (int(ilvg.lvm_vg_size) * int(
|
||||
ilvg.lvm_vg_free_pe)
|
||||
/ int(ilvg.lvm_vg_total_pe)) / (1024 * 1024)
|
||||
// int(ilvg.lvm_vg_total_pe)) // (1024 * 1024)
|
||||
break
|
||||
|
||||
cgtsvg_max_free_gib = cgtsvg_free_mib / 1024
|
||||
cgtsvg_max_free_gib = cgtsvg_free_mib // 1024
|
||||
|
||||
LOG.info("get_node_cgtsvg_limit host=%s, cgtsvg_max_free_gib=%s"
|
||||
% (host.hostname, cgtsvg_max_free_gib))
|
||||
|
@ -629,13 +631,10 @@ class SBApiHelper(object):
|
|||
# TODO(oponcea): Remove this once sm supports in-service config reload
|
||||
ctrls = pecan.request.dbapi.ihost_get_by_personality(constants.CONTROLLER)
|
||||
if len(ctrls) == 1:
|
||||
if ctrls[0].administrative == constants.ADMIN_UNLOCKED:
|
||||
if get_system_mode() == constants.SYSTEM_MODE_SIMPLEX:
|
||||
msg = _("Storage backend operations require controller "
|
||||
"host to be locked.")
|
||||
else:
|
||||
msg = _("Storage backend operations require both controllers "
|
||||
"to be enabled and available.")
|
||||
if (ctrls[0].administrative == constants.ADMIN_UNLOCKED and
|
||||
get_system_mode() == constants.SYSTEM_MODE_DUPLEX):
|
||||
msg = _("Storage backend operations require both controllers "
|
||||
"to be enabled and available.")
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
else:
|
||||
for ctrl in ctrls:
|
||||
|
|
|
@ -20,16 +20,16 @@
|
|||
from oslo_config import cfg
|
||||
|
||||
from sysinv.common import paths
|
||||
from sysinv.openstack.common.db.sqlalchemy import session as db_session
|
||||
from oslo_db import options as db_options
|
||||
from sysinv.openstack.common import rpc
|
||||
from sysinv import version
|
||||
|
||||
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
|
||||
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('sysinv.sqlite')
|
||||
|
||||
db_options.set_defaults(cfg.CONF, connection=_DEFAULT_SQL_CONNECTION)
|
||||
|
||||
|
||||
def parse_args(argv, default_config_files=None):
|
||||
db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
|
||||
sqlite_db='sysinv.sqlite')
|
||||
rpc.set_defaults(control_exchange='sysinv')
|
||||
cfg.CONF(argv[1:],
|
||||
project='sysinv',
|
||||
|
|
|
@ -479,6 +479,7 @@ SB_CEPH_MON_GIB_MIN = 20
|
|||
SB_CEPH_MON_GIB_MAX = 40
|
||||
|
||||
SB_CONFIGURATION_TIMEOUT = 1200
|
||||
INIT_CEPH_INFO_INTERVAL_SECS = 30
|
||||
|
||||
# Ceph storage deployment model
|
||||
# Controller model: OSDs are on controllers, no storage nodes can
|
||||
|
@ -640,12 +641,14 @@ NETWORK_TYPE_PCI_PASSTHROUGH = 'pci-passthrough'
|
|||
NETWORK_TYPE_PCI_SRIOV = 'pci-sriov'
|
||||
NETWORK_TYPE_PXEBOOT = 'pxeboot'
|
||||
NETWORK_TYPE_IRONIC = 'ironic'
|
||||
NETWORK_TYPE_STORAGE = 'storage'
|
||||
|
||||
PLATFORM_NETWORK_TYPES = [NETWORK_TYPE_PXEBOOT,
|
||||
NETWORK_TYPE_MGMT,
|
||||
NETWORK_TYPE_OAM,
|
||||
NETWORK_TYPE_CLUSTER_HOST,
|
||||
NETWORK_TYPE_IRONIC]
|
||||
NETWORK_TYPE_IRONIC,
|
||||
NETWORK_TYPE_STORAGE]
|
||||
|
||||
PCI_NETWORK_TYPES = [NETWORK_TYPE_PCI_PASSTHROUGH,
|
||||
NETWORK_TYPE_PCI_SRIOV]
|
||||
|
@ -931,6 +934,12 @@ SERVICE_TYPE_DOCKER = 'docker'
|
|||
SERVICE_TYPE_HTTP = 'http'
|
||||
SERVICE_TYPE_OPENSTACK = 'openstack'
|
||||
SERVICE_TYPE_KUBERNETES = 'kubernetes'
|
||||
SERVICE_TYPE_PTP = 'ptp'
|
||||
|
||||
# For service parameter sections that include a wildcard, any 'name' field will be
|
||||
# allowed by the API. The wildcard card name will only be matched if no other matches
|
||||
# are found first.
|
||||
SERVICE_PARAM_NAME_WILDCARD = '*wildcard*'
|
||||
|
||||
SERVICE_PARAM_SECTION_IDENTITY_CONFIG = 'config'
|
||||
|
||||
|
@ -1037,6 +1046,28 @@ DEFAULT_REGISTRIES_INFO = {
|
|||
SERVICE_PARAM_SECTION_KUBERNETES_CERTIFICATES = 'certificates'
|
||||
SERVICE_PARAM_NAME_KUBERNETES_API_SAN_LIST = 'apiserver_certsan'
|
||||
|
||||
SERVICE_PARAM_SECTION_KUBERNETES_APISERVER = 'kube_apiserver'
|
||||
SERVICE_PARAM_NAME_OIDC_ISSUER_URL = 'oidc_issuer_url'
|
||||
SERVICE_PARAM_NAME_OIDC_CLIENT_ID = 'oidc_client_id'
|
||||
SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM = 'oidc_username_claim'
|
||||
SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM = 'oidc_groups_claim'
|
||||
|
||||
# ptp service parameters
|
||||
SERVICE_PARAM_SECTION_PTP_GLOBAL = 'global'
|
||||
SERVICE_PARAM_SECTION_PTP_PHC2SYS = 'phc2sys'
|
||||
SERVICE_PARAM_NAME_PTP_UPDATE_RATE = 'update-rate'
|
||||
SERVICE_PARAM_NAME_PTP_SUMMARY_UPDATES = 'summary-updates'
|
||||
|
||||
PTP_PHC2SYS_DEFAULTS = {
|
||||
SERVICE_PARAM_NAME_PTP_UPDATE_RATE: 10,
|
||||
SERVICE_PARAM_NAME_PTP_SUMMARY_UPDATES: 600
|
||||
}
|
||||
|
||||
PTP_PHC2SYS_OPTIONS_MAP = {
|
||||
SERVICE_PARAM_NAME_PTP_UPDATE_RATE: 'R',
|
||||
SERVICE_PARAM_NAME_PTP_SUMMARY_UPDATES: 'u'
|
||||
}
|
||||
|
||||
# default filesystem size to 25 MB
|
||||
SERVICE_PARAM_RADOSGW_FS_SIZE_MB_DEFAULT = 25
|
||||
|
||||
|
@ -1269,6 +1300,7 @@ DOCKER_REGISTRY_PKCS1_KEY_FILE_SHARED = os.path.join(tsc.CONFIG_PATH,
|
|||
SSL_CERT_CA_DIR = "/etc/pki/ca-trust/source/anchors/"
|
||||
SSL_CERT_CA_FILE = os.path.join(SSL_CERT_CA_DIR, CERT_CA_FILE)
|
||||
SSL_CERT_CA_FILE_SHARED = os.path.join(tsc.CONFIG_PATH, CERT_CA_FILE)
|
||||
SSL_CERT_CA_LIST_SHARED_DIR = os.path.join(tsc.CONFIG_PATH, "ssl_ca")
|
||||
|
||||
KUBERNETES_PKI_SHARED_DIR = os.path.join(tsc.CONFIG_PATH, "kubernetes/pki")
|
||||
|
||||
|
@ -1528,6 +1560,7 @@ CLOCK_SYNCHRONIZATION = [
|
|||
# PTP transport modes
|
||||
PTP_TRANSPORT_UDP = 'udp'
|
||||
PTP_TRANSPORT_L2 = 'l2'
|
||||
PTP_NETWORK_TRANSPORT_IEEE_802_3 = 'L2'
|
||||
|
||||
# Backup & Restore
|
||||
FIX_INSTALL_UUID_INTERVAL_SECS = 30
|
||||
|
|
|
@ -1373,6 +1373,14 @@ class KubeNamespaceDeleteTimeout(SysinvException):
|
|||
message = "Namespace %(name)s deletion timeout."
|
||||
|
||||
|
||||
class KubePodDeleteTimeout(SysinvException):
|
||||
message = "Pod %(namespace)/%(name)s deletion timeout."
|
||||
|
||||
|
||||
class KubePodDeleteUnexpected(SysinvException):
|
||||
message = "Pod %(namespace)/%(name)s was unexpectedly deleted."
|
||||
|
||||
|
||||
class HelmTillerFailure(SysinvException):
|
||||
message = _("Helm operation failure: %(reason)s")
|
||||
|
||||
|
@ -1398,6 +1406,11 @@ class InvalidHelmDockerImageSource(Invalid):
|
|||
class PlatformApplicationApplyFailure(SysinvException):
|
||||
message = _("Failed to apply %(name)s application.")
|
||||
|
||||
|
||||
class IncompatibleKubeVersion(SysinvException):
|
||||
message = _("The application %(name)s (%(version)s) is incompatible with the current "
|
||||
"Kubernetes version %(kube_version)s.")
|
||||
|
||||
#
|
||||
# Kubernetes related exceptions
|
||||
#
|
||||
|
|
|
@ -66,7 +66,7 @@ KUBE_HOST_UPGRADING_KUBELET_FAILED = 'upgrading-kubelet-failed'
|
|||
# Kubernetes constants
|
||||
MANIFEST_APPLY_TIMEOUT = 60 * 15
|
||||
MANIFEST_APPLY_INTERVAL = 10
|
||||
POD_START_TIMEOUT = 60
|
||||
POD_START_TIMEOUT = 60 * 2
|
||||
POD_START_INTERVAL = 10
|
||||
|
||||
|
||||
|
@ -82,6 +82,21 @@ def get_kube_versions():
|
|||
]
|
||||
|
||||
|
||||
def is_kube_version_supported(kube_version, min_version=None, max_version=None):
|
||||
"""Check if the k8s version is supported by the application.
|
||||
|
||||
:param kube_version: the running or target k8s version
|
||||
:param min_version (optional): minimum k8s version supported by the app
|
||||
:param max_version (optional): maximum k8s version supported by the app
|
||||
|
||||
:returns bool: True if k8s version is supported
|
||||
"""
|
||||
if ((min_version is not None and LooseVersion(kube_version) < LooseVersion(min_version)) or
|
||||
(max_version is not None and LooseVersion(kube_version) > LooseVersion(max_version))):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_kube_networking_upgrade_version(kube_upgrade):
|
||||
"""Determine the version that kubernetes networking
|
||||
should be upgraded to."""
|
||||
|
@ -234,14 +249,22 @@ class KubeOperator(object):
|
|||
"kube_get_namespace %s: %s" % (namespace, e))
|
||||
raise
|
||||
|
||||
def kube_get_namespace_name_list(self):
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
ns_list = c.list_namespace()
|
||||
return list(set(ns.metadata.name for ns in ns_list.items))
|
||||
except Exception as e:
|
||||
LOG.error("Failed to get Namespace list: %s" % e)
|
||||
raise
|
||||
|
||||
def kube_get_secret(self, name, namespace):
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
c.read_namespaced_secret(name, namespace)
|
||||
return True
|
||||
return c.read_namespaced_secret(name, namespace)
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
return False
|
||||
return None
|
||||
else:
|
||||
LOG.error("Failed to get Secret %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body))
|
||||
|
@ -270,6 +293,15 @@ class KubeOperator(object):
|
|||
"%s: %s" % (name, src_namespace, dst_namespace, e))
|
||||
raise
|
||||
|
||||
def kube_patch_secret(self, name, namespace, body):
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
c.patch_namespaced_secret(name, namespace, body)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to patch Secret %s under Namespace %s: "
|
||||
"%s" % (name, namespace, e))
|
||||
raise
|
||||
|
||||
def kube_delete_persistent_volume_claim(self, namespace, **kwargs):
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
|
@ -542,3 +574,52 @@ class KubeOperator(object):
|
|||
return None
|
||||
else:
|
||||
return match.group(1)
|
||||
|
||||
def kube_get_all_pods(self):
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
api_response = c.list_pod_for_all_namespaces(watch=False)
|
||||
return api_response.items
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in "
|
||||
"kube_get_pods: %s" % e)
|
||||
raise
|
||||
|
||||
def kube_delete_pod(self, name, namespace, **kwargs):
|
||||
body = {}
|
||||
|
||||
if kwargs:
|
||||
body.update(kwargs)
|
||||
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
api_response = c.delete_namespaced_pod(name, namespace, body)
|
||||
LOG.debug("%s" % api_response)
|
||||
return True
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
LOG.warn("Pod %s/%s not found." % (namespace, name))
|
||||
return False
|
||||
else:
|
||||
LOG.error("Failed to delete Pod %s/%s: "
|
||||
"%s" % (namespace, name, e.body))
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_delete_pod: %s" % e)
|
||||
raise
|
||||
|
||||
def kube_get_pod(self, name, namespace):
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
api_response = c.read_namespaced_pod(name, namespace)
|
||||
return api_response
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
return None
|
||||
else:
|
||||
LOG.error("Failed to get Pod %s/%s: %s" % (namespace, name,
|
||||
e.body))
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in "
|
||||
"kube_get_pod %s/%s: %s" % (namespace, name, e))
|
||||
|
|
|
@ -12,6 +12,7 @@ import pecan
|
|||
import wsme
|
||||
|
||||
from oslo_log import log
|
||||
from six.moves.urllib.parse import urlparse
|
||||
from sysinv._i18n import _
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
|
@ -145,6 +146,17 @@ def _validate_SAN_list(name, value):
|
|||
% entry))
|
||||
|
||||
|
||||
def _validate_oidc_issuer_url(name, value):
|
||||
"""Check if oidc issuer address is valid"""
|
||||
|
||||
# is_valid_domain_or_ip does not work with entire urls
|
||||
# for example, the 'https://' needs to be removed
|
||||
parsed_value = urlparse(value)
|
||||
if not parsed_value.netloc or not cutils.is_valid_domain_or_ip(parsed_value.netloc):
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Parameter '%s' must be a valid address or domain." % name))
|
||||
|
||||
|
||||
def _get_network_pool_from_ip_address(ip, networks):
|
||||
for name in networks:
|
||||
try:
|
||||
|
@ -517,6 +529,28 @@ KUBERNETES_CERTIFICATES_PARAMETER_DATA_FORMAT = {
|
|||
constants.SERVICE_PARAM_NAME_KUBERNETES_API_SAN_LIST: SERVICE_PARAMETER_DATA_FORMAT_ARRAY,
|
||||
}
|
||||
|
||||
KUBERNETES_APISERVER_PARAMETER_OPTIONAL = [
|
||||
constants.SERVICE_PARAM_NAME_OIDC_ISSUER_URL,
|
||||
constants.SERVICE_PARAM_NAME_OIDC_CLIENT_ID,
|
||||
constants.SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM,
|
||||
constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM,
|
||||
]
|
||||
|
||||
KUBERNETES_APISERVER_PARAMETER_VALIDATOR = {
|
||||
constants.SERVICE_PARAM_NAME_OIDC_ISSUER_URL: _validate_oidc_issuer_url,
|
||||
}
|
||||
|
||||
KUBERNETES_APISERVER_PARAMETER_RESOURCE = {
|
||||
constants.SERVICE_PARAM_NAME_OIDC_ISSUER_URL:
|
||||
'platform::kubernetes::params::oidc_issuer_url',
|
||||
constants.SERVICE_PARAM_NAME_OIDC_CLIENT_ID:
|
||||
'platform::kubernetes::params::oidc_client_id',
|
||||
constants.SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM:
|
||||
'platform::kubernetes::params::oidc_username_claim',
|
||||
constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM:
|
||||
'platform::kubernetes::params::oidc_groups_claim',
|
||||
}
|
||||
|
||||
HTTPD_PORT_PARAMETER_OPTIONAL = [
|
||||
constants.SERVICE_PARAM_HTTP_PORT_HTTP,
|
||||
constants.SERVICE_PARAM_HTTP_PORT_HTTPS,
|
||||
|
@ -545,6 +579,25 @@ OPENSTACK_HELM_PARAMETER_RESOURCE = {
|
|||
'openstack::helm::params::endpoint_domain',
|
||||
}
|
||||
|
||||
PTP_GLOBAL_PARAMETER_OPTIONAL = [
|
||||
constants.SERVICE_PARAM_NAME_WILDCARD
|
||||
]
|
||||
|
||||
PTP_GLOBAL_PARAMETER_VALIDATOR = {
|
||||
constants.SERVICE_PARAM_NAME_WILDCARD: _validate_not_empty
|
||||
}
|
||||
|
||||
PTP_PHC2SYS_PARAMETER_OPTIONAL = [
|
||||
constants.SERVICE_PARAM_NAME_PTP_UPDATE_RATE,
|
||||
constants.SERVICE_PARAM_NAME_PTP_SUMMARY_UPDATES
|
||||
]
|
||||
|
||||
PTP_PHC2SYS_PARAMETER_VALIDATOR = {
|
||||
constants.SERVICE_PARAM_NAME_PTP_UPDATE_RATE: _validate_float,
|
||||
# phc2sys summary-updates accepts a range of 0 to UNIT_MAX (ie 2^32 - 1)
|
||||
constants.SERVICE_PARAM_NAME_PTP_SUMMARY_UPDATES: lambda name, value: _validate_range(name, value, 0, 2 ** 32 - 1)
|
||||
}
|
||||
|
||||
# Service Parameter Schema
|
||||
SERVICE_PARAM_MANDATORY = 'mandatory'
|
||||
SERVICE_PARAM_OPTIONAL = 'optional'
|
||||
|
@ -628,6 +681,21 @@ SERVICE_PARAMETER_SCHEMA = {
|
|||
SERVICE_PARAM_RESOURCE: KUBERNETES_CERTIFICATES_PARAMETER_RESOURCE,
|
||||
SERVICE_PARAM_DATA_FORMAT: KUBERNETES_CERTIFICATES_PARAMETER_DATA_FORMAT,
|
||||
},
|
||||
constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER: {
|
||||
SERVICE_PARAM_OPTIONAL: KUBERNETES_APISERVER_PARAMETER_OPTIONAL,
|
||||
SERVICE_PARAM_VALIDATOR: KUBERNETES_APISERVER_PARAMETER_VALIDATOR,
|
||||
SERVICE_PARAM_RESOURCE: KUBERNETES_APISERVER_PARAMETER_RESOURCE,
|
||||
},
|
||||
},
|
||||
constants.SERVICE_TYPE_PTP: {
|
||||
constants.SERVICE_PARAM_SECTION_PTP_GLOBAL: {
|
||||
SERVICE_PARAM_OPTIONAL: PTP_GLOBAL_PARAMETER_OPTIONAL,
|
||||
SERVICE_PARAM_VALIDATOR: PTP_GLOBAL_PARAMETER_VALIDATOR
|
||||
},
|
||||
constants.SERVICE_PARAM_SECTION_PTP_PHC2SYS: {
|
||||
SERVICE_PARAM_OPTIONAL: PTP_PHC2SYS_PARAMETER_OPTIONAL,
|
||||
SERVICE_PARAM_VALIDATOR: PTP_PHC2SYS_PARAMETER_VALIDATOR
|
||||
},
|
||||
},
|
||||
constants.SERVICE_TYPE_HTTP: {
|
||||
constants.SERVICE_PARAM_SECTION_HTTP_CONFIG: {
|
||||
|
|
|
@ -28,6 +28,8 @@ import boto3
|
|||
from botocore.config import Config
|
||||
import collections
|
||||
import contextlib
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
import datetime
|
||||
import errno
|
||||
import functools
|
||||
|
@ -2088,6 +2090,25 @@ def is_chart_enabled(dbapi, app_name, chart_name, namespace):
|
|||
False)
|
||||
|
||||
|
||||
def get_app_supported_kube_version(app_name, app_version):
|
||||
"""Get the application supported k8s version from the synced application metadata file"""
|
||||
|
||||
app_metadata_path = os.path.join(
|
||||
constants.APP_SYNCED_ARMADA_DATA_PATH, app_name,
|
||||
app_version, constants.APP_METADATA_FILE)
|
||||
|
||||
kube_min_version = None
|
||||
kube_max_version = None
|
||||
if (os.path.exists(app_metadata_path) and
|
||||
os.path.getsize(app_metadata_path) > 0):
|
||||
with open(app_metadata_path, 'r') as f:
|
||||
y = yaml.safe_load(f)
|
||||
supported_kube_version = y.get('supported_k8s_version', {})
|
||||
kube_min_version = supported_kube_version.get('minimum', None)
|
||||
kube_max_version = supported_kube_version.get('maximum', None)
|
||||
return kube_min_version, kube_max_version
|
||||
|
||||
|
||||
def app_reapply_flag_file(app_name):
|
||||
return "%s.%s" % (
|
||||
constants.APP_PENDING_REAPPLY_FLAG,
|
||||
|
@ -2166,3 +2187,32 @@ def get_aws_ecr_registry_credentials(dbapi, registry, username, password):
|
|||
"Failed to get AWS ECR credentials: %s" % e))
|
||||
|
||||
return dict(username=username, password=password)
|
||||
|
||||
|
||||
def extract_certs_from_pem(pem_contents):
|
||||
"""
|
||||
Extract certificates from a pem string
|
||||
|
||||
:param pem_contents: A string in pem format
|
||||
:return certs: A list of x509 cert objects
|
||||
"""
|
||||
marker = b'-----BEGIN CERTIFICATE-----'
|
||||
|
||||
start = 0
|
||||
certs = []
|
||||
while True:
|
||||
index = pem_contents.find(marker, start)
|
||||
if index == -1:
|
||||
break
|
||||
try:
|
||||
cert = x509.load_pem_x509_certificate(pem_contents[index::],
|
||||
default_backend())
|
||||
except Exception:
|
||||
LOG.exception(_("Load pem x509 certificate failed at file "
|
||||
"location: %s") % index)
|
||||
raise exception.SysinvException(_(
|
||||
"Failed to load pem x509 certificate"))
|
||||
|
||||
certs.append(cert)
|
||||
start = start + index + len(marker)
|
||||
return certs
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Copyright (C) 2019 Intel Corporation
|
||||
#
|
||||
"""
|
||||
Sysinv Keystone notification listener.
|
||||
"""
|
||||
|
||||
import keyring
|
||||
import oslo_messaging
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import utils
|
||||
from sysinv.db import api as dbapi
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
kube_app = None
|
||||
|
||||
|
||||
class NotificationEndpoint(object):
|
||||
"""Task which exposes the API for consuming priority based notifications.
|
||||
|
||||
The Oslo notification framework delivers notifications based on priority to
|
||||
matching callback APIs as defined in its notification listener endpoint
|
||||
list.
|
||||
|
||||
Currently from Keystone perspective, `info` API is sufficient as Keystone
|
||||
send notifications at `info` priority ONLY. Other priority level APIs
|
||||
(warn, error, critical, audit, debug) are not needed here.
|
||||
"""
|
||||
filter_rule = oslo_messaging.NotificationFilter(
|
||||
event_type='identity.user.updated')
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
"""Receives notification at info level."""
|
||||
global kube_app
|
||||
kube_app.audit_local_registry_secrets()
|
||||
return oslo_messaging.NotificationResult.HANDLED
|
||||
|
||||
|
||||
def get_transport_url():
|
||||
try:
|
||||
db_api = dbapi.get_instance()
|
||||
address = db_api.address_get_by_name(
|
||||
utils.format_address_name(constants.CONTROLLER_HOSTNAME,
|
||||
constants.NETWORK_TYPE_MGMT)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
LOG.error("Failed to get management IP address: %s" % str(e))
|
||||
return None
|
||||
|
||||
auth_password = keyring.get_password('amqp', 'rabbit')
|
||||
|
||||
transport_url = "rabbit://guest:%s@%s:5672" % (auth_password, address.address)
|
||||
return transport_url
|
||||
|
||||
|
||||
def start_keystone_listener(app):
|
||||
|
||||
global kube_app
|
||||
kube_app = app
|
||||
|
||||
conf = cfg.ConfigOpts()
|
||||
conf.transport_url = get_transport_url()
|
||||
|
||||
if conf.transport_url is None:
|
||||
return
|
||||
|
||||
transport = oslo_messaging.get_rpc_transport(conf)
|
||||
targets = [
|
||||
oslo_messaging.Target(exchange='keystone', topic='notifications', fanout=True),
|
||||
]
|
||||
endpoints = [
|
||||
NotificationEndpoint(),
|
||||
]
|
||||
|
||||
pool = "sysinv-keystone-listener-workers"
|
||||
server = oslo_messaging.get_notification_listener(transport, targets,
|
||||
endpoints, pool=pool)
|
||||
LOG.info("Sysinv keystone listener started!")
|
||||
server.start()
|
||||
server.wait()
|
|
@ -41,6 +41,7 @@ from sysinv.common import image_versions
|
|||
from sysinv.common.retrying import retry
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
||||
from sysinv.conductor import kube_pod_helper as kube_pod
|
||||
from sysinv.conductor import openstack
|
||||
from sysinv.helm import common
|
||||
from sysinv.helm import helm
|
||||
|
@ -142,6 +143,7 @@ class AppOperator(object):
|
|||
self._kube = kubernetes.KubeOperator()
|
||||
self._utils = kube_app.KubeAppHelper(self._dbapi)
|
||||
self._image = AppImageParser()
|
||||
self._kube_pod = kube_pod.K8sPodOperator(self._kube)
|
||||
self._lock = threading.Lock()
|
||||
|
||||
if not os.path.isfile(constants.ANSIBLE_BOOTSTRAP_FLAG):
|
||||
|
@ -946,7 +948,7 @@ class AppOperator(object):
|
|||
for ns in namespaces:
|
||||
if (ns in [common.HELM_NS_HELM_TOOLKIT,
|
||||
common.HELM_NS_STORAGE_PROVISIONER] or
|
||||
self._kube.kube_get_secret(pool_secret, ns)):
|
||||
self._kube.kube_get_secret(pool_secret, ns) is not None):
|
||||
# Secret already exist
|
||||
continue
|
||||
|
||||
|
@ -1012,7 +1014,7 @@ class AppOperator(object):
|
|||
list(set([ns for ns_list in app_ns.values() for ns in ns_list]))
|
||||
for ns in namespaces:
|
||||
if (ns == common.HELM_NS_HELM_TOOLKIT or
|
||||
self._kube.kube_get_secret(DOCKER_REGISTRY_SECRET, ns)):
|
||||
self._kube.kube_get_secret(DOCKER_REGISTRY_SECRET, ns) is not None):
|
||||
# Secret already exist
|
||||
continue
|
||||
|
||||
|
@ -1063,6 +1065,81 @@ class AppOperator(object):
|
|||
LOG.error(e)
|
||||
raise
|
||||
|
||||
def audit_local_registry_secrets(self):
|
||||
"""
|
||||
local registry uses admin's username&password for authentication.
|
||||
K8s stores the authentication info in secrets in order to access
|
||||
local registry, while admin's password is saved in keyring.
|
||||
Admin's password could be changed by openstack client cmd outside of
|
||||
sysinv and K8s. It will cause info mismatch between keyring and
|
||||
k8s's secrets, and leads to authentication failure.
|
||||
There are two ways to keep k8s's secrets updated with data in keyring:
|
||||
1. Polling. Use a periodic task to sync info from keyring to secrets.
|
||||
2. Notification. Keystone send out notification when there is password
|
||||
update, and notification receiver to do the data sync.
|
||||
To ensure k8s's secrets are timely and always synced with keyring, both
|
||||
methods are used here. And this function will be called in both cases
|
||||
to audit password info between keyring and registry-local-secret, and
|
||||
update keyring's password to all local registry secrets if need.
|
||||
"""
|
||||
|
||||
# Use lock to synchronize call from timer and notification
|
||||
lock_name = "AUDIT_LOCAL_REGISTRY_SECRETS"
|
||||
|
||||
@cutils.synchronized(lock_name, external=False)
|
||||
def _sync_audit_local_registry_secrets(self):
|
||||
try:
|
||||
secret = self._kube.kube_get_secret("registry-local-secret", kubernetes.NAMESPACE_KUBE_SYSTEM)
|
||||
if secret is None:
|
||||
return
|
||||
secret_auth_body = base64.b64decode(secret.data['.dockerconfigjson'])
|
||||
secret_auth_info = (secret_auth_body.split('auth":')[1]).split('"')[1]
|
||||
registry_auth = cutils.get_local_docker_registry_auth()
|
||||
registry_auth_info = '{0}:{1}'.format(registry_auth['username'],
|
||||
registry_auth['password'])
|
||||
if secret_auth_info == base64.b64encode(registry_auth_info):
|
||||
LOG.debug("Auth info is the same, no update is needed for k8s secret.")
|
||||
return
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
return
|
||||
try:
|
||||
# update secret with new auth info
|
||||
token = '{{\"auths\": {{\"{0}\": {{\"auth\": \"{1}\"}}}}}}'.format(
|
||||
constants.DOCKER_REGISTRY_SERVER, base64.b64encode(registry_auth_info))
|
||||
secret.data['.dockerconfigjson'] = base64.b64encode(token)
|
||||
self._kube.kube_patch_secret("registry-local-secret", kubernetes.NAMESPACE_KUBE_SYSTEM, secret)
|
||||
LOG.info("Secret registry-local-secret under Namespace kube-system is updated")
|
||||
except Exception as e:
|
||||
LOG.error("Failed to update Secret %s under Namespace kube-system: %s"
|
||||
% ("registry-local-secret", e))
|
||||
return
|
||||
|
||||
# update "default-registry-key" secret info under all namespaces
|
||||
try:
|
||||
ns_list = self._kube.kube_get_namespace_name_list()
|
||||
for ns in ns_list:
|
||||
secret = self._kube.kube_get_secret(DOCKER_REGISTRY_SECRET, ns)
|
||||
if secret is None:
|
||||
continue
|
||||
|
||||
try:
|
||||
secret_auth_body = base64.b64decode(secret.data['.dockerconfigjson'])
|
||||
if constants.DOCKER_REGISTRY_SERVER in secret_auth_body:
|
||||
secret.data['.dockerconfigjson'] = base64.b64encode(token)
|
||||
self._kube.kube_patch_secret(DOCKER_REGISTRY_SECRET, ns, secret)
|
||||
LOG.info("Secret %s under Namespace %s is updated"
|
||||
% (DOCKER_REGISTRY_SECRET, ns))
|
||||
except Exception as e:
|
||||
LOG.error("Failed to update Secret %s under Namespace %s: %s"
|
||||
% (DOCKER_REGISTRY_SECRET, ns, e))
|
||||
continue
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
return
|
||||
|
||||
_sync_audit_local_registry_secrets(self)
|
||||
|
||||
def _delete_namespace(self, namespace):
|
||||
loop_timeout = 1
|
||||
timeout = 300
|
||||
|
@ -1546,7 +1623,7 @@ class AppOperator(object):
|
|||
LOG.error(e)
|
||||
raise
|
||||
|
||||
def _delete_app_specific_resources(self, app_name):
|
||||
def _delete_app_specific_resources(self, app_name, operation_type):
|
||||
"""Remove application specific k8s resources.
|
||||
|
||||
Some applications may need resources created outside of the existing
|
||||
|
@ -1570,9 +1647,11 @@ class AppOperator(object):
|
|||
raise
|
||||
self._delete_namespace(namespace)
|
||||
|
||||
if app_name == constants.HELM_APP_OPENSTACK:
|
||||
if (app_name == constants.HELM_APP_OPENSTACK and
|
||||
operation_type == constants.APP_REMOVE_OP):
|
||||
_delete_ceph_persistent_volume_claim(common.HELM_NS_OPENSTACK)
|
||||
elif app_name == constants.HELM_APP_MONITOR:
|
||||
elif (app_name == constants.HELM_APP_MONITOR and
|
||||
operation_type == constants.APP_DELETE_OP):
|
||||
_delete_ceph_persistent_volume_claim(common.HELM_NS_MONITOR)
|
||||
|
||||
def _perform_app_recover(self, old_app, new_app, armada_process_required=True):
|
||||
|
@ -1800,7 +1879,15 @@ class AppOperator(object):
|
|||
|
||||
with self._lock:
|
||||
self._extract_tarfile(app)
|
||||
|
||||
# Copy the armada manfest and metadata file to the drbd
|
||||
shutil.copy(app.inst_armada_mfile, app.sync_armada_mfile)
|
||||
inst_metadata_file = os.path.join(
|
||||
app.inst_path, constants.APP_METADATA_FILE)
|
||||
if os.path.exists(inst_metadata_file):
|
||||
sync_metadata_file = os.path.join(
|
||||
app.sync_armada_mfile_dir, constants.APP_METADATA_FILE)
|
||||
shutil.copy(inst_metadata_file, sync_metadata_file)
|
||||
|
||||
if not self._docker.make_armada_request(
|
||||
'validate', manifest_file=app.armada_service_mfile):
|
||||
|
@ -1950,6 +2037,16 @@ class AppOperator(object):
|
|||
True)
|
||||
|
||||
self.clear_reapply(app.name)
|
||||
# WORKAROUND: For k8s MatchNodeSelector issue. Look for and clean up any
|
||||
# pods that could block manifest apply
|
||||
#
|
||||
# Upstream reports of this:
|
||||
# - https://github.com/kubernetes/kubernetes/issues/80745
|
||||
# - https://github.com/kubernetes/kubernetes/issues/85334
|
||||
#
|
||||
# Outstanding PR that was tested and fixed this issue:
|
||||
# - https://github.com/kubernetes/kubernetes/pull/80976
|
||||
self._kube_pod.delete_failed_pods_by_reason(reason='MatchNodeSelector')
|
||||
|
||||
LOG.info("Application %s (%s) apply started." % (app.name, app.version))
|
||||
|
||||
|
@ -1999,7 +2096,8 @@ class AppOperator(object):
|
|||
self._abort_operation(app, constants.APP_APPLY_OP,
|
||||
user_initiated=True)
|
||||
else:
|
||||
self._abort_operation(app, constants.APP_APPLY_OP, str(e))
|
||||
self._abort_operation(app, constants.APP_APPLY_OP,
|
||||
constants.APP_PROGRESS_ABORTED)
|
||||
|
||||
if not caller:
|
||||
# If apply is not called from update method, deregister the app's
|
||||
|
@ -2124,6 +2222,8 @@ class AppOperator(object):
|
|||
try:
|
||||
# Upload new app tarball
|
||||
to_app = self.perform_app_upload(to_rpc_app, tarfile)
|
||||
# Check whether the new application is compatible with the current k8s version
|
||||
self._utils._check_app_compatibility(to_app.name, to_app.version)
|
||||
|
||||
self._update_app_status(to_app, constants.APP_UPDATE_IN_PROGRESS)
|
||||
|
||||
|
@ -2173,13 +2273,15 @@ class AppOperator(object):
|
|||
to_app.version))
|
||||
LOG.info("Application %s update from version %s to version "
|
||||
"%s completed." % (to_app.name, from_app.version, to_app.version))
|
||||
except (exception.KubeAppUploadFailure,
|
||||
except (exception.IncompatibleKubeVersion,
|
||||
exception.KubeAppUploadFailure,
|
||||
exception.KubeAppApplyFailure,
|
||||
exception.KubeAppAbort):
|
||||
exception.KubeAppAbort) as e:
|
||||
# Error occurs during app uploading or applying but before
|
||||
# armada apply process...
|
||||
# ie.images download/k8s resource creation failure
|
||||
# Start recovering without trigger armada process
|
||||
LOG.exception(e)
|
||||
return self._perform_app_recover(from_app, to_app,
|
||||
armada_process_required=False)
|
||||
except Exception as e:
|
||||
|
@ -2240,7 +2342,7 @@ class AppOperator(object):
|
|||
if app.system_app:
|
||||
if self._storage_provisioner_required(app.name):
|
||||
self._delete_storage_provisioner_secrets(app.name)
|
||||
self._delete_app_specific_resources(app.name)
|
||||
self._delete_app_specific_resources(app.name, constants.APP_REMOVE_OP)
|
||||
except Exception as e:
|
||||
self._abort_operation(app, constants.APP_REMOVE_OP)
|
||||
LOG.exception(e)
|
||||
|
@ -2328,6 +2430,8 @@ class AppOperator(object):
|
|||
|
||||
app = AppOperator.Application(rpc_app)
|
||||
try:
|
||||
if app.system_app:
|
||||
self._delete_app_specific_resources(app.name, constants.APP_DELETE_OP)
|
||||
self._dbapi.kube_app_destroy(app.name)
|
||||
self._cleanup(app)
|
||||
self._utils._patch_report_app_dependencies(app.name + '-' + app.version)
|
||||
|
@ -2603,22 +2707,12 @@ class DockerHelper(object):
|
|||
elif pub_img_tag.startswith(registry_info['registry_replaced']):
|
||||
return pub_img_tag, registry_auth
|
||||
|
||||
# If the image is not from any of the known registries
|
||||
# In case the image is overridden via "system helm-override-update"
|
||||
# with a custom registry that is not from any of the known registries
|
||||
# (ie..k8s.gcr.io, gcr.io, quay.io, docker.io. docker.elastic.co)
|
||||
# or no registry name specified in image tag, use user specified
|
||||
# docker registry as default
|
||||
registry = self.registries_info[
|
||||
constants.SERVICE_PARAM_SECTION_DOCKER_DOCKER_REGISTRY]['registry_replaced']
|
||||
registry_auth = self.registries_info[
|
||||
constants.SERVICE_PARAM_SECTION_DOCKER_DOCKER_REGISTRY]['registry_auth']
|
||||
registry_name = pub_img_tag[:pub_img_tag.find('/')]
|
||||
|
||||
if registry:
|
||||
LOG.info("Registry %s not recognized or docker.io repository "
|
||||
"detected. Pulling from public/private registry"
|
||||
% registry_name)
|
||||
return registry + '/' + pub_img_tag, registry_auth
|
||||
return pub_img_tag, registry_auth
|
||||
# , pull directly from the custom registry (Note: The custom registry
|
||||
# must be unauthenticated in this case.)
|
||||
return pub_img_tag, None
|
||||
|
||||
def _start_armada_service(self, client):
|
||||
try:
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
|
||||
""" System Inventory Kubernetes Pod Operator."""
|
||||
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from dateutil import tz
|
||||
from oslo_log import log as logging
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import kubernetes
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class K8sPodOperator(object):
|
||||
|
||||
def __init__(self, kube_op=None):
|
||||
self.kube_op = kube_op
|
||||
if not self.kube_op:
|
||||
self.kube_op = kubernetes.KubeOperator(None)
|
||||
|
||||
def _get_all_pods(self):
|
||||
try:
|
||||
pods = self.kube_op.kube_get_all_pods()
|
||||
except Exception:
|
||||
pods = []
|
||||
return pods
|
||||
|
||||
def _delete_pod(self, name, namespace, expect_removal=True):
|
||||
loop_timeout = 1
|
||||
timeout = 30
|
||||
try:
|
||||
LOG.info("Deleting Pod %s/%s ..." % (namespace, name))
|
||||
delete_requested = datetime.datetime.now(tz.tzlocal())
|
||||
if not self.kube_op.kube_delete_pod(name, namespace,
|
||||
grace_periods_seconds=0):
|
||||
LOG.warning("Pod %s/%s deletion unsuccessful..." % (namespace,
|
||||
name))
|
||||
return
|
||||
|
||||
# Pod termination timeout: 30 seconds
|
||||
while(loop_timeout <= timeout):
|
||||
pod = self.kube_op.kube_get_pod(name, namespace)
|
||||
if not pod and not expect_removal:
|
||||
# Pod has been unexpectedly terminated
|
||||
raise exception.KubePodDeleteUnexpected(namespace=namespace,
|
||||
name=name)
|
||||
elif not pod and expect_removal:
|
||||
# Pod has been terminated
|
||||
LOG.info("Pod %s/%s succesfully terminated" % (namespace,
|
||||
name))
|
||||
break
|
||||
elif pod and not expect_removal:
|
||||
if pod.status.phase == 'Pending':
|
||||
# Pod is restarting.
|
||||
LOG.info("Pod %s/%s restart pending" % (namespace, name))
|
||||
break
|
||||
elif pod.status.phase == 'Running':
|
||||
if pod.metadata.creation_timestamp > delete_requested:
|
||||
# Pod restarted quickly
|
||||
LOG.info("Pod %s/%s recreated %ss ago" % (
|
||||
namespace, name,
|
||||
(delete_requested -
|
||||
pod.metadata.creation_timestamp).total_seconds()))
|
||||
break
|
||||
LOG.info("Pod %s/%s running" % (namespace, name))
|
||||
elif pod and expect_removal:
|
||||
# Still around or missed the Pending state transition
|
||||
LOG.info("Pod %s/%s (%s) waiting on removal." % (
|
||||
namespace, name, pod.status.phase))
|
||||
loop_timeout += 1
|
||||
time.sleep(1)
|
||||
|
||||
if loop_timeout > timeout:
|
||||
raise exception.KubePodDeleteTimeout(namespace=namespace, name=name)
|
||||
LOG.info("Pod %s/%s delete completed." % (namespace, name))
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise
|
||||
|
||||
def get_failed_pods_by_reason(self, reason=None):
|
||||
failed_pods = []
|
||||
all_pods = self._get_all_pods()
|
||||
for pod in all_pods:
|
||||
if pod.status.phase == 'Failed':
|
||||
if reason:
|
||||
if pod.status.reason == reason:
|
||||
failed_pods.append(pod)
|
||||
else:
|
||||
failed_pods.append(pod)
|
||||
return failed_pods
|
||||
|
||||
def delete_failed_pods_by_reason(self, pods=None, reason=None):
|
||||
failed_pods = pods
|
||||
if not pods:
|
||||
failed_pods = self.get_failed_pods_by_reason(reason=reason)
|
||||
|
||||
for pod in failed_pods:
|
||||
LOG.info("DELETING POD: %s/%s: found as %s/%s" % (
|
||||
pod.metadata.namespace, pod.metadata.name,
|
||||
pod.status.phase, pod.status.reason))
|
||||
try:
|
||||
self._delete_pod(pod.metadata.name, pod.metadata.namespace)
|
||||
except Exception:
|
||||
pass
|
|
@ -45,12 +45,12 @@ import uuid
|
|||
import xml.etree.ElementTree as ElementTree
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
import tsconfig.tsconfig as tsc
|
||||
from collections import namedtuple
|
||||
from cgcs_patch.patch_verify import verify_files
|
||||
from controllerconfig.upgrades import management as upgrades_management
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
|
@ -94,8 +94,10 @@ from sysinv.common.storage_backend_conf import StorageBackendConfig
|
|||
from cephclient import wrapper as ceph
|
||||
from sysinv.conductor import ceph as iceph
|
||||
from sysinv.conductor import kube_app
|
||||
from sysinv.conductor import kube_pod_helper as kube_pod
|
||||
from sysinv.conductor import openstack
|
||||
from sysinv.conductor import docker_registry
|
||||
from sysinv.conductor import keystone_listener
|
||||
from sysinv.db import api as dbapi
|
||||
from sysinv import objects
|
||||
from sysinv.objects import base as objects_base
|
||||
|
@ -182,6 +184,7 @@ class ConductorManager(service.PeriodicService):
|
|||
self._ceph_api = ceph.CephWrapper(
|
||||
endpoint='http://localhost:5001')
|
||||
self._kube = None
|
||||
self._kube_pod = None
|
||||
self._fernet = None
|
||||
|
||||
self._openstack = None
|
||||
|
@ -204,7 +207,17 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
# Upgrade/Downgrade kubernetes components.
|
||||
# greenthread must be called after super.start for it to work properly.
|
||||
greenthread.spawn(self._upgrade_downgrade_kube_components())
|
||||
greenthread.spawn(self._upgrade_downgrade_kube_components)
|
||||
|
||||
# monitor keystone user update event to check whether admin password is
|
||||
# changed or not. If changed, then sync it to kubernetes's secret info.
|
||||
greenthread.spawn(keystone_listener.start_keystone_listener, self._app)
|
||||
|
||||
# Monitor ceph to become responsive
|
||||
if StorageBackendConfig.has_backend_configured(
|
||||
self.dbapi,
|
||||
constants.SB_TYPE_CEPH):
|
||||
greenthread.spawn(self._init_ceph_cluster_info)
|
||||
|
||||
def _start(self):
|
||||
self.dbapi = dbapi.get_instance()
|
||||
|
@ -229,9 +242,9 @@ class ConductorManager(service.PeriodicService):
|
|||
# ceph for the initial unlock.
|
||||
self._app = kube_app.AppOperator(self.dbapi)
|
||||
self._docker = kube_app.DockerHelper(self.dbapi)
|
||||
self._ceph = iceph.CephOperator(self.dbapi)
|
||||
self._helm = helm.HelmOperator(self.dbapi)
|
||||
self._kube = kubernetes.KubeOperator()
|
||||
self._kube_pod = kube_pod.K8sPodOperator(self._kube)
|
||||
self._kube_app_helper = kube_api.KubeAppHelper(self.dbapi)
|
||||
self._fernet = fernet.FernetOperator()
|
||||
|
||||
|
@ -244,6 +257,9 @@ class ConductorManager(service.PeriodicService):
|
|||
LOG.info("sysinv-conductor start committed system=%s" %
|
||||
system.as_dict())
|
||||
|
||||
# Save our start time for time limited init actions
|
||||
self._start_time = timeutils.utcnow()
|
||||
|
||||
def periodic_tasks(self, context, raise_on_error=False):
|
||||
""" Periodic tasks are run at pre-specified intervals. """
|
||||
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
|
||||
|
@ -1228,11 +1244,11 @@ class ConductorManager(service.PeriodicService):
|
|||
return
|
||||
|
||||
address = self.dbapi.address_get_by_name(address_name)
|
||||
interface_uuid = address.interface_uuid
|
||||
interface_id = address.interface_id
|
||||
ip_address = address.address
|
||||
|
||||
if interface_uuid:
|
||||
interface = self.dbapi.iinterface_get(interface_uuid)
|
||||
if interface_id:
|
||||
interface = self.dbapi.iinterface_get(interface_id)
|
||||
mac_address = interface.imac
|
||||
elif network_type == constants.NETWORK_TYPE_MGMT:
|
||||
ihost = self.dbapi.ihost_get_by_hostname(hostname)
|
||||
|
@ -1362,7 +1378,7 @@ class ConductorManager(service.PeriodicService):
|
|||
return
|
||||
if not self.dbapi.ceph_mon_get_by_ihost(host.uuid):
|
||||
system = self.dbapi.isystem_get_one()
|
||||
ceph_mon_gib = None
|
||||
ceph_mon_gib = constants.SB_CEPH_MON_GIB
|
||||
ceph_mons = self.dbapi.ceph_mon_get_list()
|
||||
if ceph_mons:
|
||||
ceph_mon_gib = ceph_mons[0].ceph_mon_gib
|
||||
|
@ -1387,7 +1403,14 @@ class ConductorManager(service.PeriodicService):
|
|||
LOG.info("Deleting ceph monitor for host %s"
|
||||
% str(host.hostname))
|
||||
self.dbapi.ceph_mon_destroy(mon[0].uuid)
|
||||
self._ceph.remove_ceph_monitor(host.hostname)
|
||||
# At this point self._ceph should always be set, but we check
|
||||
# just to be sure
|
||||
if self._ceph is not None:
|
||||
self._ceph.remove_ceph_monitor(host.hostname)
|
||||
else:
|
||||
# This should never happen, but if it does, log it so
|
||||
# there is a trace of it
|
||||
LOG.error("Error deleting ceph monitor")
|
||||
else:
|
||||
LOG.info("No ceph monitor present for host %s. "
|
||||
"Skipping deleting ceph monitor."
|
||||
|
@ -1540,8 +1563,21 @@ class ConductorManager(service.PeriodicService):
|
|||
:param host: host object
|
||||
"""
|
||||
|
||||
# Update cluster and peers model
|
||||
self._ceph.update_ceph_cluster(host)
|
||||
# Update cluster and peers model.
|
||||
# We call this function when setting the personality of a storage host.
|
||||
# In cases where we configure the storage-backend before unlocking
|
||||
# controller-0, and then configuring all other hosts, ceph will not be
|
||||
# responsive (and self._ceph not be set) when setting the storage
|
||||
# personality.
|
||||
# But that's ok, because this function is also called when unlocking a
|
||||
# storage node and we are guaranteed (by consistency checks) a
|
||||
# responsive ceph cluster at that point in time and we can update the
|
||||
# ceph cluster information succesfully.
|
||||
if self._ceph is not None:
|
||||
self._ceph.update_ceph_cluster(host)
|
||||
else:
|
||||
# It's ok, we just log a message for debug purposes
|
||||
LOG.debug("Error updating cluster information")
|
||||
|
||||
# Only update the manifest if the host is running the same version as
|
||||
# the active controller.
|
||||
|
@ -1760,7 +1796,6 @@ class ConductorManager(service.PeriodicService):
|
|||
:param inic_dict_array: initial values for iport objects
|
||||
:returns: pass or fail
|
||||
"""
|
||||
|
||||
LOG.debug("Entering iport_update_by_ihost %s %s" %
|
||||
(ihost_uuid, inic_dict_array))
|
||||
ihost_uuid.strip()
|
||||
|
@ -2042,7 +2077,7 @@ class ConductorManager(service.PeriodicService):
|
|||
addr_name = cutils.format_address_name(ihost.hostname,
|
||||
networktype)
|
||||
address = self.dbapi.address_get_by_name(addr_name)
|
||||
if address['interface_uuid'] is None:
|
||||
if address['interface_id'] is None:
|
||||
self.dbapi.address_update(address['uuid'], values)
|
||||
except exception.AddressNotFoundByName:
|
||||
pass
|
||||
|
@ -4191,6 +4226,38 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
return
|
||||
|
||||
@retry(retry_on_result=lambda x: x is False,
|
||||
wait_fixed=(constants.INIT_CEPH_INFO_INTERVAL_SECS * 1000))
|
||||
def _init_ceph_cluster_info(self):
|
||||
if not self._ceph:
|
||||
try:
|
||||
_, fsid = self._ceph_api.fsid(body='text', timeout=10)
|
||||
except Exception as e:
|
||||
LOG.debug("Ceph REST API not responsive. Error = %s" % str(e))
|
||||
return False
|
||||
LOG.info("Ceph cluster has become responsive")
|
||||
self._ceph = iceph.CephOperator(self.dbapi)
|
||||
|
||||
try:
|
||||
# We manually check for the crushmap_applied flag because we don't
|
||||
# want to re-fix the crushmap if it's already been fixed and the
|
||||
# fix_crushmap function returns False if it finds the flag.
|
||||
crushmap_flag_file = os.path.join(
|
||||
constants.SYSINV_CONFIG_PATH,
|
||||
constants.CEPH_CRUSH_MAP_APPLIED)
|
||||
if not os.path.isfile(crushmap_flag_file):
|
||||
return cceph.fix_crushmap(self.dbapi)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
# fix_crushmap will throw an exception if the storage_model
|
||||
# is unclear. This happens on a standard (2+2) setup, before
|
||||
# adding storage-0 or adding the 3rd monitor to a compute node.
|
||||
# In such cases we just wait until the mode has become clear,
|
||||
# so we just return False and retry.
|
||||
LOG.debug("Error fixing crushmap. Exception %s" % str(e))
|
||||
return False
|
||||
|
||||
def _fix_storage_install_uuid(self):
|
||||
"""
|
||||
Fixes install_uuid for storage nodes during a restore procedure
|
||||
|
@ -4401,10 +4468,12 @@ class ConductorManager(service.PeriodicService):
|
|||
{'capabilities': ihost.capabilities})
|
||||
|
||||
if availability == constants.AVAILABILITY_AVAILABLE:
|
||||
if imsg_dict.get(constants.SYSINV_AGENT_FIRST_REPORT):
|
||||
if (imsg_dict.get(constants.SYSINV_AGENT_FIRST_REPORT) and
|
||||
StorageBackendConfig.has_backend_configured(
|
||||
self.dbapi,
|
||||
constants.SB_TYPE_CEPH)):
|
||||
# This should be run once after a node boot
|
||||
self._clear_ceph_stor_state(ihost_uuid)
|
||||
cceph.fix_crushmap(self.dbapi)
|
||||
config_uuid = imsg_dict['config_applied']
|
||||
self._update_host_config_applied(context, ihost, config_uuid)
|
||||
|
||||
|
@ -4830,6 +4899,13 @@ class ConductorManager(service.PeriodicService):
|
|||
'install_state_info':
|
||||
host.install_state_info})
|
||||
|
||||
@periodic_task.periodic_task(spacing=CONF.conductor.audit_interval)
|
||||
def _kubernetes_local_secrets_audit(self, context):
|
||||
# Audit kubernetes local registry secrets info
|
||||
LOG.debug("Sysinv Conductor running periodic audit task for k8s local registry secrets.")
|
||||
if self._app:
|
||||
self._app.audit_local_registry_secrets()
|
||||
|
||||
@periodic_task.periodic_task(spacing=CONF.conductor.audit_interval)
|
||||
def _conductor_audit(self, context):
|
||||
# periodically, perform audit of inventory
|
||||
|
@ -5117,6 +5193,27 @@ class ConductorManager(service.PeriodicService):
|
|||
(active_ctrl.operational != constants.OPERATIONAL_ENABLED))):
|
||||
return
|
||||
|
||||
# WORKAROUND: For k8s MatchNodeSelector issue. Call this for a limited
|
||||
# time (5 times over ~5 minutes) on a AIO-SX controller
|
||||
# configuration after conductor startup.
|
||||
#
|
||||
# Upstream reports of this:
|
||||
# - https://github.com/kubernetes/kubernetes/issues/80745
|
||||
# - https://github.com/kubernetes/kubernetes/issues/85334
|
||||
#
|
||||
# Outstanding PR that was tested and fixed this issue:
|
||||
# - https://github.com/kubernetes/kubernetes/pull/80976
|
||||
system_mode = self.dbapi.isystem_get_one().system_mode
|
||||
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
|
||||
if (self._start_time + timedelta(minutes=5) >
|
||||
datetime.now(self._start_time.tzinfo)):
|
||||
LOG.info("Periodic Task: _k8s_application_audit: Checking for "
|
||||
"MatchNodeSelector issue for %s" % str(
|
||||
(self._start_time + timedelta(minutes=5)) -
|
||||
datetime.now(self._start_time.tzinfo)))
|
||||
self._kube_pod.delete_failed_pods_by_reason(
|
||||
reason='MatchNodeSelector')
|
||||
|
||||
# Check the application state and take the approprate action
|
||||
for app_name in constants.HELM_APPS_PLATFORM_MANAGED:
|
||||
|
||||
|
@ -5384,6 +5481,9 @@ class ConductorManager(service.PeriodicService):
|
|||
constants.CINDER_BACKEND_CEPH):
|
||||
return 0
|
||||
|
||||
if self._ceph is None:
|
||||
return 0
|
||||
|
||||
if not self._ceph.get_ceph_cluster_info_availability():
|
||||
return 0
|
||||
|
||||
|
@ -5397,6 +5497,9 @@ class ConductorManager(service.PeriodicService):
|
|||
constants.CINDER_BACKEND_CEPH):
|
||||
return 0
|
||||
|
||||
if self._ceph is None:
|
||||
return 0
|
||||
|
||||
if not self._ceph.get_ceph_cluster_info_availability():
|
||||
return 0
|
||||
|
||||
|
@ -5411,6 +5514,9 @@ class ConductorManager(service.PeriodicService):
|
|||
constants.CINDER_BACKEND_CEPH):
|
||||
return
|
||||
|
||||
if self._ceph is None:
|
||||
return
|
||||
|
||||
if not self._ceph.get_ceph_cluster_info_availability():
|
||||
return
|
||||
|
||||
|
@ -5423,6 +5529,9 @@ class ConductorManager(service.PeriodicService):
|
|||
constants.CINDER_BACKEND_CEPH):
|
||||
return
|
||||
|
||||
if self._ceph is None:
|
||||
return
|
||||
|
||||
if not self._ceph.get_ceph_cluster_info_availability():
|
||||
return
|
||||
|
||||
|
@ -5598,12 +5707,38 @@ class ConductorManager(service.PeriodicService):
|
|||
constants.STORAGE]
|
||||
self._config_update_hosts(context, personalities, reboot=True)
|
||||
|
||||
def update_ptp_config(self, context):
|
||||
def update_ptp_config(self, context, do_apply=False):
|
||||
"""Update the PTP configuration"""
|
||||
self._update_ptp_host_configs(context, do_apply)
|
||||
|
||||
def _update_ptp_host_configs(self, context, do_apply=False):
|
||||
"""Issue config updates to hosts with ptp clocks"""
|
||||
personalities = [constants.CONTROLLER,
|
||||
constants.WORKER,
|
||||
constants.STORAGE]
|
||||
self._config_update_hosts(context, personalities)
|
||||
|
||||
hosts = self.dbapi.ihost_get_list()
|
||||
ptp_hosts = [host.uuid for host in hosts if host.clock_synchronization == constants.PTP]
|
||||
|
||||
if ptp_hosts:
|
||||
config_uuid = self._config_update_hosts(context, personalities, host_uuids=ptp_hosts)
|
||||
if do_apply:
|
||||
runtime_hosts = []
|
||||
for host in hosts:
|
||||
if (host.clock_synchronization == constants.PTP and
|
||||
host.administrative == constants.ADMIN_UNLOCKED and
|
||||
host.operational == constants.OPERATIONAL_ENABLED and
|
||||
not (self._config_out_of_date(host) and
|
||||
self._config_is_reboot_required(host.config_target))):
|
||||
runtime_hosts.append(host.uuid)
|
||||
|
||||
if runtime_hosts:
|
||||
config_dict = {
|
||||
"personalities": personalities,
|
||||
"classes": ['platform::ptp::runtime'],
|
||||
"host_uuids": runtime_hosts
|
||||
}
|
||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||
|
||||
def update_system_mode_config(self, context):
|
||||
"""Update the system mode configuration"""
|
||||
|
@ -5689,7 +5824,7 @@ class ConductorManager(service.PeriodicService):
|
|||
config_dict = {
|
||||
"personalities": personalities,
|
||||
'host_uuids': [host_uuid],
|
||||
"classes": 'platform::network::runtime',
|
||||
"classes": 'platform::interfaces::sriov::runtime',
|
||||
puppet_common.REPORT_INVENTORY_UPDATE:
|
||||
puppet_common.REPORT_PCI_SRIOV_CONFIG,
|
||||
}
|
||||
|
@ -6027,7 +6162,8 @@ class ConductorManager(service.PeriodicService):
|
|||
ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
|
||||
valid_ctrls = [ctrl for ctrl in ctrls if
|
||||
ctrl.administrative == constants.ADMIN_UNLOCKED and
|
||||
ctrl.availability == constants.AVAILABILITY_AVAILABLE]
|
||||
ctrl.availability in [constants.AVAILABILITY_AVAILABLE,
|
||||
constants.AVAILABILITY_DEGRADED]]
|
||||
classes = ['platform::partitions::runtime',
|
||||
'platform::lvm::controller::runtime',
|
||||
'platform::haproxy::runtime',
|
||||
|
@ -6035,15 +6171,14 @@ class ConductorManager(service.PeriodicService):
|
|||
'platform::ceph::runtime_base',
|
||||
]
|
||||
|
||||
for ctrl in valid_ctrls:
|
||||
self._ceph_mon_create(ctrl)
|
||||
|
||||
if cutils.is_aio_duplex_system(self.dbapi):
|
||||
# On 2 node systems we have a floating Ceph monitor.
|
||||
classes.append('platform::drbd::cephmon::runtime')
|
||||
classes.append('platform::drbd::runtime')
|
||||
|
||||
# TODO (tliu) determine if this SB_SVC_CINDER section can be removed
|
||||
if constants.SB_SVC_CINDER in services:
|
||||
LOG.info("No cinder manifests for update_ceph_config")
|
||||
classes.append('platform::sm::norestart::runtime')
|
||||
classes.append('platform::sm::ceph::runtime')
|
||||
host_ids = [ctrl.uuid for ctrl in valid_ctrls]
|
||||
config_dict = {"personalities": personalities,
|
||||
"host_uuids": host_ids,
|
||||
|
@ -6051,33 +6186,13 @@ class ConductorManager(service.PeriodicService):
|
|||
puppet_common.REPORT_STATUS_CFG: puppet_common.REPORT_CEPH_BACKEND_CONFIG,
|
||||
}
|
||||
|
||||
# TODO(oponcea) once sm supports in-service config reload always
|
||||
# set reboot=False
|
||||
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
|
||||
if utils.is_host_simplex_controller(active_controller):
|
||||
reboot = False
|
||||
else:
|
||||
reboot = True
|
||||
|
||||
# Set config out-of-date for controllers
|
||||
config_uuid = self._config_update_hosts(context,
|
||||
personalities,
|
||||
host_uuids=host_ids,
|
||||
reboot=reboot)
|
||||
host_uuids=host_ids)
|
||||
|
||||
# TODO(oponcea): Set config_uuid to a random value to keep Config out-of-date.
|
||||
# Once sm supports in-service config reload, always set config_uuid=config_uuid
|
||||
# in _config_apply_runtime_manifest and remove code bellow.
|
||||
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
|
||||
if utils.is_host_simplex_controller(active_controller):
|
||||
new_uuid = config_uuid
|
||||
else:
|
||||
new_uuid = str(uuid.uuid4())
|
||||
# Apply runtime config but keep reboot required flag set in
|
||||
# _config_update_hosts() above. Node needs a reboot to clear it.
|
||||
new_uuid = self._config_clear_reboot_required(new_uuid)
|
||||
self._config_apply_runtime_manifest(context,
|
||||
config_uuid=new_uuid,
|
||||
config_uuid=config_uuid,
|
||||
config_dict=config_dict)
|
||||
|
||||
tasks = {}
|
||||
|
@ -6822,7 +6937,7 @@ class ConductorManager(service.PeriodicService):
|
|||
state = constants.SB_STATE_CONFIGURED
|
||||
if cutils.is_aio_system(self.dbapi):
|
||||
task = None
|
||||
cceph.fix_crushmap(self.dbapi)
|
||||
greenthread.spawn(self._init_ceph_cluster_info)
|
||||
else:
|
||||
task = constants.SB_TASK_PROVISION_STORAGE
|
||||
values = {'state': state,
|
||||
|
@ -6843,7 +6958,7 @@ class ConductorManager(service.PeriodicService):
|
|||
if host.uuid == host_uuid:
|
||||
break
|
||||
else:
|
||||
LOG.error("Host %(host) is not in the required state!" % host_uuid)
|
||||
LOG.error("Host %s is not in the required state!" % host_uuid)
|
||||
host = self.dbapi.ihost_get(host_uuid)
|
||||
if not host:
|
||||
LOG.error("Host %s is invalid!" % host_uuid)
|
||||
|
@ -6863,6 +6978,7 @@ class ConductorManager(service.PeriodicService):
|
|||
if ceph_conf.state != constants.SB_STATE_CONFIG_ERR:
|
||||
if config_success:
|
||||
values = {'task': constants.SB_TASK_PROVISION_STORAGE}
|
||||
greenthread.spawn(self._init_ceph_cluster_info)
|
||||
else:
|
||||
values = {'task': str(tasks)}
|
||||
self.dbapi.storage_backend_update(ceph_conf.uuid, values)
|
||||
|
@ -6925,7 +7041,7 @@ class ConductorManager(service.PeriodicService):
|
|||
if host.uuid == host_uuid:
|
||||
break
|
||||
else:
|
||||
LOG.error("Host %(host) is not in the required state!" % host_uuid)
|
||||
LOG.error("Host %s is not in the required state!" % host_uuid)
|
||||
host = self.dbapi.ihost_get(host_uuid)
|
||||
if not host:
|
||||
LOG.error("Host %s is invalid!" % host_uuid)
|
||||
|
@ -7001,7 +7117,7 @@ class ConductorManager(service.PeriodicService):
|
|||
if host.uuid == host_uuid:
|
||||
break
|
||||
else:
|
||||
LOG.error("Host %(host) is not in the required state!" % host_uuid)
|
||||
LOG.error("Host %s is not in the required state!" % host_uuid)
|
||||
host = self.dbapi.ihost_get(host_uuid)
|
||||
if not host:
|
||||
LOG.error("Host %s is invalid!" % host_uuid)
|
||||
|
@ -7308,6 +7424,8 @@ class ConductorManager(service.PeriodicService):
|
|||
elif service == constants.SERVICE_TYPE_OPENSTACK:
|
||||
# Do nothing. Does not need to update target config of any hosts
|
||||
pass
|
||||
elif service == constants.SERVICE_TYPE_PTP:
|
||||
self._update_ptp_host_configs(context, do_apply=do_apply)
|
||||
else:
|
||||
# All other services
|
||||
personalities = [constants.CONTROLLER]
|
||||
|
@ -7356,6 +7474,14 @@ class ConductorManager(service.PeriodicService):
|
|||
}
|
||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||
|
||||
elif service == constants.SERVICE_TYPE_KUBERNETES:
|
||||
personalities = [constants.CONTROLLER]
|
||||
config_dict = {
|
||||
"personalities": personalities,
|
||||
"classes": ['platform::kubernetes::master::change_apiserver_parameters']
|
||||
}
|
||||
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
|
||||
|
||||
elif service == constants.SERVICE_TYPE_HTTP:
|
||||
# the platform::config class will be applied that will
|
||||
# configure the http port
|
||||
|
@ -9900,24 +10026,19 @@ class ConductorManager(service.PeriodicService):
|
|||
"""Extract keys from the pem contents
|
||||
|
||||
:param mode: mode one of: ssl, tpm_mode, docker_registry
|
||||
:param pem_contents: pem_contents
|
||||
:param pem_contents: pem_contents in unicode
|
||||
:param cert_format: serialization.PrivateFormat
|
||||
:param passphrase: passphrase for PEM file
|
||||
|
||||
:returns: private_bytes, public_bytes, signature
|
||||
:returns: A list of {cert, private_bytes, public_bytes, signature}
|
||||
"""
|
||||
|
||||
temp_pem_file = constants.SSL_PEM_FILE + '.temp'
|
||||
with os.fdopen(os.open(temp_pem_file, os.O_CREAT | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'w') as f:
|
||||
f.write(pem_contents)
|
||||
|
||||
if passphrase:
|
||||
passphrase = str(passphrase)
|
||||
|
||||
private_bytes = None
|
||||
private_mode = False
|
||||
temp_pem_contents = pem_contents.encode("utf-8")
|
||||
if mode in [constants.CERT_MODE_SSL,
|
||||
constants.CERT_MODE_TPM,
|
||||
constants.CERT_MODE_DOCKER_REGISTRY,
|
||||
|
@ -9925,43 +10046,100 @@ class ConductorManager(service.PeriodicService):
|
|||
]:
|
||||
private_mode = True
|
||||
|
||||
with open(temp_pem_file, "r") as key_file:
|
||||
if private_mode:
|
||||
# extract private_key with passphrase
|
||||
try:
|
||||
private_key = serialization.load_pem_private_key(
|
||||
key_file.read(),
|
||||
password=passphrase,
|
||||
backend=default_backend())
|
||||
except Exception as e:
|
||||
raise exception.SysinvException(_("Error decrypting PEM "
|
||||
"file: %s" % e))
|
||||
key_file.seek(0)
|
||||
# extract the certificate from the pem file
|
||||
cert = x509.load_pem_x509_certificate(key_file.read(),
|
||||
default_backend())
|
||||
os.remove(temp_pem_file)
|
||||
|
||||
if private_mode:
|
||||
# extract private_key with passphrase
|
||||
try:
|
||||
private_key = serialization.load_pem_private_key(
|
||||
temp_pem_contents,
|
||||
password=passphrase,
|
||||
backend=default_backend())
|
||||
except Exception as e:
|
||||
raise exception.SysinvException(_("Error loading private key "
|
||||
"from PEM data: %s" % e))
|
||||
|
||||
if not isinstance(private_key, rsa.RSAPrivateKey):
|
||||
raise exception.SysinvException(_("Only RSA encryption based "
|
||||
"Private Keys are supported."))
|
||||
raise exception.SysinvException(_(
|
||||
"Only RSA encryption based Private Keys are supported."))
|
||||
|
||||
private_bytes = private_key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=cert_format,
|
||||
encryption_algorithm=serialization.NoEncryption())
|
||||
try:
|
||||
private_bytes = private_key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=cert_format,
|
||||
encryption_algorithm=serialization.NoEncryption())
|
||||
except Exception as e:
|
||||
raise exception.SysinvException(_("Error loading private "
|
||||
"bytes from PEM data: %s"
|
||||
% e))
|
||||
|
||||
signature = mode + '_' + str(cert.serial_number)
|
||||
if len(signature) > 255:
|
||||
LOG.info("Truncating certificate serial no %s" % signature)
|
||||
signature = signature[:255]
|
||||
LOG.info("config_certificate signature=%s" % signature)
|
||||
certs = cutils.extract_certs_from_pem(temp_pem_contents)
|
||||
key_list = []
|
||||
for cert in certs:
|
||||
# format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
try:
|
||||
public_bytes = cert.public_bytes(
|
||||
encoding=serialization.Encoding.PEM)
|
||||
except Exception as e:
|
||||
raise exception.SysinvException(_("Error loading public "
|
||||
"bytes from PEM data: %s"
|
||||
% e))
|
||||
|
||||
# format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM)
|
||||
signature = mode + '_' + str(cert.serial_number)
|
||||
if len(signature) > 255:
|
||||
LOG.info("Truncating certificate serial no %s" % signature)
|
||||
signature = signature[:255]
|
||||
LOG.info("config_certificate signature=%s" % signature)
|
||||
|
||||
return private_bytes, public_bytes, signature
|
||||
key_list.append({'cert': cert,
|
||||
'private_bytes': private_bytes,
|
||||
'public_bytes': public_bytes,
|
||||
'signature': signature})
|
||||
|
||||
return key_list
|
||||
|
||||
@staticmethod
|
||||
def _get_public_bytes_one(key_list):
|
||||
"""Get exactly one public bytes entry from key list"""
|
||||
|
||||
if len(key_list) != 1:
|
||||
msg = "There should be exactly one certificate " \
|
||||
"(ie, public_bytes) in the pem contents."
|
||||
LOG.error(msg)
|
||||
raise exception.SysinvException(_(msg))
|
||||
return key_list[0].get('public_bytes')
|
||||
|
||||
@staticmethod
|
||||
def _get_private_bytes_one(key_list):
|
||||
"""Get exactly one private bytes entry from key list"""
|
||||
|
||||
if len(key_list) != 1:
|
||||
msg = "There should be exactly one private key " \
|
||||
"(ie, private_bytes) in the pem contents."
|
||||
LOG.error(msg)
|
||||
raise exception.SysinvException(_(msg))
|
||||
return key_list[0].get('private_bytes')
|
||||
|
||||
@staticmethod
|
||||
def _consolidate_cert_files():
|
||||
# Cat all the cert files into one CA cert file and store it in
|
||||
# the shared directory to update system CA certs
|
||||
try:
|
||||
new_cert_files = \
|
||||
os.listdir(constants.SSL_CERT_CA_LIST_SHARED_DIR)
|
||||
with os.fdopen(
|
||||
os.open(constants.SSL_CERT_CA_FILE_SHARED,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_DEFAULT),
|
||||
'wb') as f:
|
||||
for fname in new_cert_files:
|
||||
fname = \
|
||||
os.path.join(constants.SSL_CERT_CA_LIST_SHARED_DIR,
|
||||
fname)
|
||||
with open(fname, "r") as infile:
|
||||
f.write(infile.read())
|
||||
except Exception as e:
|
||||
msg = "Failed to consolidate cert files: %s" % str(e)
|
||||
LOG.warn(msg)
|
||||
raise exception.SysinvException(_(msg))
|
||||
|
||||
def _perform_config_certificate_tpm_mode(self, context,
|
||||
tpm, private_bytes, public_bytes):
|
||||
|
@ -10027,7 +10205,7 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
LOG.info("config_certificate mode=%s" % mode)
|
||||
|
||||
private_bytes, public_bytes, signature = \
|
||||
key_list = \
|
||||
self._extract_keys_from_pem(mode, pem_contents,
|
||||
serialization.PrivateFormat.PKCS8,
|
||||
passphrase)
|
||||
|
@ -10040,19 +10218,23 @@ class ConductorManager(service.PeriodicService):
|
|||
pass
|
||||
|
||||
if mode == constants.CERT_MODE_TPM:
|
||||
private_bytes = self._get_private_bytes_one(key_list)
|
||||
public_bytes = self._get_public_bytes_one(key_list)
|
||||
self._perform_config_certificate_tpm_mode(
|
||||
context, tpm, private_bytes, public_bytes)
|
||||
|
||||
file_content = public_bytes
|
||||
# copy the certificate to shared directory
|
||||
with os.fdopen(os.open(constants.SSL_PEM_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(file_content)
|
||||
|
||||
elif mode == constants.CERT_MODE_SSL:
|
||||
config_uuid = self._config_update_hosts(context, personalities)
|
||||
private_bytes = self._get_private_bytes_one(key_list)
|
||||
public_bytes = self._get_public_bytes_one(key_list)
|
||||
file_content = private_bytes + public_bytes
|
||||
config_dict = {
|
||||
'personalities': personalities,
|
||||
|
@ -10065,7 +10247,7 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
# copy the certificate to shared directory
|
||||
with os.fdopen(os.open(constants.SSL_PEM_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(file_content)
|
||||
|
@ -10086,17 +10268,49 @@ class ConductorManager(service.PeriodicService):
|
|||
config_dict)
|
||||
|
||||
elif mode == constants.CERT_MODE_SSL_CA:
|
||||
file_content = public_bytes
|
||||
# The list of the existing CA certs in sysinv DB.
|
||||
certificates = self.dbapi.certificate_get_list()
|
||||
certs_inv = [certificate.signature
|
||||
for certificate in certificates
|
||||
if certificate.certtype == mode]
|
||||
# The list of the actual CA certs as files in FS
|
||||
certs_file = os.listdir(constants.SSL_CERT_CA_LIST_SHARED_DIR)
|
||||
|
||||
# Remove these already installed from the key list
|
||||
key_list_c = key_list[:]
|
||||
for key in key_list_c:
|
||||
if key.get('signature') in certs_inv \
|
||||
and key.get('signature') in certs_file:
|
||||
key_list.remove(key)
|
||||
|
||||
# Save certs in files and cat them into ca-cert.pem to apply to the
|
||||
# system.
|
||||
if key_list:
|
||||
# Save each cert in a separate file with signature as its name
|
||||
try:
|
||||
for key in key_list:
|
||||
file_content = key.get('public_bytes')
|
||||
file_name = \
|
||||
os.path.join(constants.SSL_CERT_CA_LIST_SHARED_DIR,
|
||||
key.get('signature'))
|
||||
with os.fdopen(
|
||||
os.open(file_name,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_DEFAULT),
|
||||
'wb') as f:
|
||||
f.write(file_content)
|
||||
except Exception as e:
|
||||
msg = "Failed to save cert file: %s" % str(e)
|
||||
LOG.warn(msg)
|
||||
raise exception.SysinvException(_(msg))
|
||||
|
||||
# consolidate the CA cert files into ca-cert.pem to update
|
||||
# system CA certs.
|
||||
self._consolidate_cert_files()
|
||||
|
||||
personalities = [constants.CONTROLLER,
|
||||
constants.WORKER,
|
||||
constants.STORAGE]
|
||||
# copy the certificate to shared directory
|
||||
with os.fdopen(os.open(constants.SSL_CERT_CA_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_DEFAULT),
|
||||
'wb') as f:
|
||||
f.write(file_content)
|
||||
|
||||
config_uuid = self._config_update_hosts(context, personalities)
|
||||
config_dict = {
|
||||
"personalities": personalities,
|
||||
|
@ -10109,10 +10323,11 @@ class ConductorManager(service.PeriodicService):
|
|||
elif mode == constants.CERT_MODE_DOCKER_REGISTRY:
|
||||
LOG.info("Docker registry certificate install")
|
||||
# docker registry requires a PKCS1 key for the token server
|
||||
pkcs1_private_bytes, pkcs1_public_bytes, pkcs1_signature = \
|
||||
key_list_pkcs1 = \
|
||||
self._extract_keys_from_pem(mode, pem_contents,
|
||||
serialization.PrivateFormat
|
||||
.TraditionalOpenSSL, passphrase)
|
||||
pkcs1_private_bytes = self._get_private_bytes_one(key_list_pkcs1)
|
||||
|
||||
# install certificate, key, and pkcs1 key to controllers
|
||||
config_uuid = self._config_update_hosts(context, personalities)
|
||||
|
@ -10120,6 +10335,9 @@ class ConductorManager(service.PeriodicService):
|
|||
cert_path = constants.DOCKER_REGISTRY_CERT_FILE
|
||||
pkcs1_key_path = constants.DOCKER_REGISTRY_PKCS1_KEY_FILE
|
||||
|
||||
private_bytes = self._get_private_bytes_one(key_list)
|
||||
public_bytes = self._get_public_bytes_one(key_list)
|
||||
|
||||
config_dict = {
|
||||
'personalities': personalities,
|
||||
'file_names': [key_path, cert_path, pkcs1_key_path],
|
||||
|
@ -10133,17 +10351,17 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
# copy certificate to shared directory
|
||||
with os.fdopen(os.open(constants.DOCKER_REGISTRY_CERT_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(public_bytes)
|
||||
with os.fdopen(os.open(constants.DOCKER_REGISTRY_KEY_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(private_bytes)
|
||||
with os.fdopen(os.open(constants.DOCKER_REGISTRY_PKCS1_KEY_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(pkcs1_private_bytes)
|
||||
|
@ -10178,6 +10396,9 @@ class ConductorManager(service.PeriodicService):
|
|||
config_uuid = self._config_update_hosts(context, personalities)
|
||||
key_path = constants.OPENSTACK_CERT_KEY_FILE
|
||||
cert_path = constants.OPENSTACK_CERT_FILE
|
||||
private_bytes = self._get_private_bytes_one(key_list)
|
||||
public_bytes = self._get_public_bytes_one(key_list)
|
||||
|
||||
config_dict = {
|
||||
'personalities': personalities,
|
||||
'file_names': [key_path, cert_path],
|
||||
|
@ -10192,12 +10413,12 @@ class ConductorManager(service.PeriodicService):
|
|||
os.makedirs(constants.CERT_OPENSTACK_SHARED_DIR)
|
||||
# copy the certificate to shared directory
|
||||
with os.fdopen(os.open(constants.OPENSTACK_CERT_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(public_bytes)
|
||||
with os.fdopen(os.open(constants.OPENSTACK_CERT_KEY_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(private_bytes)
|
||||
|
@ -10214,7 +10435,9 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
elif mode == constants.CERT_MODE_OPENSTACK_CA:
|
||||
config_uuid = self._config_update_hosts(context, personalities)
|
||||
file_content = public_bytes
|
||||
file_content = ''
|
||||
for key in key_list:
|
||||
file_content += key.get('public_bytes', '')
|
||||
config_dict = {
|
||||
'personalities': personalities,
|
||||
'file_names': [constants.OPENSTACK_CERT_CA_FILE],
|
||||
|
@ -10225,7 +10448,7 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
# copy the certificate to shared directory
|
||||
with os.fdopen(os.open(constants.OPENSTACK_CERT_CA_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_DEFAULT),
|
||||
'wb') as f:
|
||||
f.write(file_content)
|
||||
|
@ -10244,7 +10467,14 @@ class ConductorManager(service.PeriodicService):
|
|||
LOG.warn(msg)
|
||||
raise exception.SysinvException(_(msg))
|
||||
|
||||
return signature
|
||||
inv_certs = []
|
||||
for key in key_list:
|
||||
inv_cert = {'signature': key.get('signature'),
|
||||
'not_valid_before': key.get('cert').not_valid_before,
|
||||
'not_valid_after': key.get('cert').not_valid_after}
|
||||
inv_certs.append(inv_cert)
|
||||
|
||||
return inv_certs
|
||||
|
||||
def _config_selfsigned_certificate(self, context):
|
||||
"""
|
||||
|
@ -10264,7 +10494,7 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
LOG.info("_config_selfsigned_certificate mode=%s file=%s" % (mode, certificate_file))
|
||||
|
||||
private_bytes, public_bytes, signature = \
|
||||
key_list = \
|
||||
self._extract_keys_from_pem(mode, pem_contents,
|
||||
serialization.PrivateFormat.PKCS8,
|
||||
passphrase)
|
||||
|
@ -10272,6 +10502,8 @@ class ConductorManager(service.PeriodicService):
|
|||
personalities = [constants.CONTROLLER]
|
||||
|
||||
config_uuid = self._config_update_hosts(context, personalities)
|
||||
private_bytes = self._get_private_bytes_one(key_list)
|
||||
public_bytes = self._get_public_bytes_one(key_list)
|
||||
file_content = private_bytes + public_bytes
|
||||
config_dict = {
|
||||
'personalities': personalities,
|
||||
|
@ -10284,12 +10516,54 @@ class ConductorManager(service.PeriodicService):
|
|||
|
||||
# copy the certificate to shared directory
|
||||
with os.fdopen(os.open(constants.SSL_PEM_FILE_SHARED,
|
||||
os.O_CREAT | os.O_WRONLY,
|
||||
os.O_CREAT | os.O_TRUNC | os.O_WRONLY,
|
||||
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
|
||||
'wb') as f:
|
||||
f.write(file_content)
|
||||
|
||||
return signature
|
||||
return key_list[0].get('signature')
|
||||
|
||||
def delete_certificate(self, context, mode, signature):
|
||||
"""Delete a certificate by its mode and signature.
|
||||
|
||||
:param context: an admin context.
|
||||
:param mode: the mode of the certificate
|
||||
:param signature: the signature of the certificate.
|
||||
|
||||
Currently only ssl_ca cert can be deleted.
|
||||
"""
|
||||
LOG.info("delete_certificate mode=%s, signature=%s" %
|
||||
(mode, signature))
|
||||
|
||||
if mode == constants.CERT_MODE_SSL_CA:
|
||||
try:
|
||||
cert_file = \
|
||||
os.path.join(constants.SSL_CERT_CA_LIST_SHARED_DIR,
|
||||
signature)
|
||||
os.remove(cert_file)
|
||||
except Exception as e:
|
||||
msg = "Failed to delete cert file: %s" % str(e)
|
||||
LOG.warn(msg)
|
||||
raise exception.SysinvException(_(msg))
|
||||
|
||||
self._consolidate_cert_files()
|
||||
|
||||
personalities = [constants.CONTROLLER,
|
||||
constants.WORKER,
|
||||
constants.STORAGE]
|
||||
config_uuid = self._config_update_hosts(context, personalities)
|
||||
config_dict = {
|
||||
"personalities": personalities,
|
||||
"classes": ['platform::config::runtime']
|
||||
}
|
||||
self._config_apply_runtime_manifest(context,
|
||||
config_uuid,
|
||||
config_dict,
|
||||
force=True)
|
||||
else:
|
||||
msg = "delete_certificate unsupported mode=%s" % mode
|
||||
LOG.error(msg)
|
||||
raise exception.SysinvException(_(msg))
|
||||
|
||||
def get_helm_chart_namespaces(self, context, chart_name):
|
||||
"""Get supported chart namespaces.
|
||||
|
|
|
@ -749,12 +749,13 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
"""
|
||||
return self.call(context, self.make_msg('update_ntp_config'))
|
||||
|
||||
def update_ptp_config(self, context):
|
||||
def update_ptp_config(self, context, do_apply=False):
|
||||
"""Synchronously, have the conductor update the PTP configuration.
|
||||
|
||||
:param context: request context.
|
||||
:param do_apply: If the config should be applied via runtime manifests
|
||||
"""
|
||||
return self.call(context, self.make_msg('update_ptp_config'))
|
||||
return self.call(context, self.make_msg('update_ptp_config', do_apply=do_apply))
|
||||
|
||||
def update_system_mode_config(self, context):
|
||||
"""Synchronously, have the conductor update the system mode
|
||||
|
@ -1571,6 +1572,20 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
config_dict=config_dict,
|
||||
))
|
||||
|
||||
def delete_certificate(self, context, mode, signature):
|
||||
"""Synchronously, have the conductor delete the certificate.
|
||||
|
||||
:param context: request context.
|
||||
:param mode: the mode of the certificate
|
||||
:param signature: the signature of the certificate.
|
||||
|
||||
"""
|
||||
return self.call(context,
|
||||
self.make_msg('delete_certificate',
|
||||
mode=mode,
|
||||
signature=signature,
|
||||
))
|
||||
|
||||
def get_helm_chart_namespaces(self, context, chart_name):
|
||||
"""Get supported chart namespaces.
|
||||
|
||||
|
|
|
@ -33,9 +33,9 @@ from oslo_log import log
|
|||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
_BACKEND_MAPPING = {'sqlalchemy': 'sysinv.db.sqlalchemy.api'}
|
||||
IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
|
||||
IMPL = db_api.DBAPI.from_config(cfg.CONF,
|
||||
backend_mapping=_BACKEND_MAPPING,
|
||||
lazy=True)
|
||||
|
||||
|
||||
|
|
|
@ -51,9 +51,6 @@ from sysinv.db import api
|
|||
from sysinv.db.sqlalchemy import models
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt('connection',
|
||||
'sysinv.openstack.common.db.sqlalchemy.session',
|
||||
group='database')
|
||||
CONF.import_opt('journal_min_size',
|
||||
'sysinv.api.controllers.v1.storage',
|
||||
group='journal')
|
||||
|
@ -350,17 +347,11 @@ def add_interface_filter(query, value):
|
|||
:return: Modified query.
|
||||
"""
|
||||
if utils.is_valid_mac(value):
|
||||
return query.filter(or_(models.EthernetInterfaces.imac == value,
|
||||
models.AeInterfaces.imac == value,
|
||||
models.VlanInterfaces.imac == value))
|
||||
return query.filter(models.Interfaces.imac == value)
|
||||
elif uuidutils.is_uuid_like(value):
|
||||
return query.filter(or_(models.EthernetInterfaces.uuid == value,
|
||||
models.AeInterfaces.uuid == value,
|
||||
models.VlanInterfaces.uuid == value))
|
||||
return query.filter(models.Interfaces.uuid == value)
|
||||
elif utils.is_int_like(value):
|
||||
return query.filter(or_(models.EthernetInterfaces.id == value,
|
||||
models.AeInterfaces.id == value,
|
||||
models.VlanInterfaces.id == value))
|
||||
return query.filter(models.Interfaces.id == value)
|
||||
else:
|
||||
return add_identity_filter(query, value, use_ifname=True)
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ from eventlet.green import subprocess
|
|||
import json
|
||||
import tsconfig.tsconfig as tsconfig
|
||||
from migrate.changeset import UniqueConstraint
|
||||
from sqlalchemy import Boolean, DateTime, Enum, Integer, String, Text
|
||||
from sqlalchemy import Boolean, DateTime, Integer, String, Text
|
||||
from sqlalchemy import Column, ForeignKey, MetaData, Table
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
|
@ -101,17 +101,26 @@ def upgrade(migrate_engine):
|
|||
primary_key=True, nullable=False),
|
||||
mysql_engine=ENGINE, mysql_charset=CHARSET,
|
||||
autoload=True)
|
||||
|
||||
if migrate_engine.url.get_dialect() is postgresql.dialect:
|
||||
old_serviceEnum = Enum('identity',
|
||||
'horizon',
|
||||
'ceph',
|
||||
'network',
|
||||
name='serviceEnum')
|
||||
|
||||
service_col = service_parameter.c.service
|
||||
service_col.alter(Column('service', String(16)))
|
||||
old_serviceEnum.drop(bind=migrate_engine, checkfirst=False)
|
||||
service_parameter.drop()
|
||||
meta.remove(service_parameter)
|
||||
service_parameter = Table(
|
||||
'service_parameter',
|
||||
meta,
|
||||
Column('created_at', DateTime),
|
||||
Column('updated_at', DateTime),
|
||||
Column('deleted_at', DateTime),
|
||||
Column('id', Integer, primary_key=True, nullable=False),
|
||||
Column('uuid', String(36), unique=True),
|
||||
Column('service', String(16)),
|
||||
Column('section', String(255)),
|
||||
Column('name', String(255)),
|
||||
Column('value', String(255)),
|
||||
UniqueConstraint('service', 'section', 'name',
|
||||
name='u_servicesectionname'),
|
||||
mysql_engine=ENGINE,
|
||||
mysql_charset=CHARSET,
|
||||
)
|
||||
service_parameter.create(migrate_engine, checkfirst=False)
|
||||
|
||||
# 049_add_controllerfs_scratch.py
|
||||
controller_fs = Table('controller_fs', meta, autoload=True)
|
||||
|
|
|
@ -30,7 +30,9 @@ from migrate.versioning.repository import Repository
|
|||
|
||||
_REPOSITORY = None
|
||||
|
||||
get_engine = enginefacade.get_legacy_facade().get_engine
|
||||
|
||||
def get_engine():
|
||||
return enginefacade.get_legacy_facade().get_engine()
|
||||
|
||||
|
||||
def db_sync(version=None):
|
||||
|
|
|
@ -149,6 +149,29 @@ class BaseHelm(object):
|
|||
self.context['_system_controller_floating_address'] = sc_float_ip
|
||||
return sc_float_ip
|
||||
|
||||
def _is_ipv6_cluster_service(self):
|
||||
if self.dbapi is None:
|
||||
return False
|
||||
|
||||
is_ipv6_cluster_service = self.context.get(
|
||||
'_is_ipv6_cluster_service', None)
|
||||
|
||||
if is_ipv6_cluster_service is None:
|
||||
try:
|
||||
cluster_service_network = self.dbapi.network_get_by_type(
|
||||
constants.NETWORK_TYPE_CLUSTER_SERVICE)
|
||||
cluster_service_network_addr_pool = self.dbapi.address_pool_get(
|
||||
cluster_service_network.pool_uuid)
|
||||
is_ipv6_cluster_service = (
|
||||
cluster_service_network_addr_pool.family ==
|
||||
constants.IPV6_FAMILY)
|
||||
except exception.NetworkTypeNotFound:
|
||||
LOG.error("No Cluster Service Network Type found")
|
||||
raise
|
||||
|
||||
self.context['_is_ipv6_cluster_service'] = is_ipv6_cluster_service
|
||||
return is_ipv6_cluster_service
|
||||
|
||||
def _region_name(self):
|
||||
"""Returns the local region name of the system"""
|
||||
if self.dbapi is None:
|
||||
|
|
|
@ -25,7 +25,7 @@ class Dex(DexBaseHelm):
|
|||
|
||||
oidc_client = {
|
||||
'id': self._get_client_id(),
|
||||
'redirectURIs': ["http://%s:%s/callback" %
|
||||
'redirectURIs': ["https://%s:%s/callback" %
|
||||
(self._format_url_address(self._get_oam_address()), self.OIDC_CLIENT_NODE_PORT)],
|
||||
'name': 'STX OIDC Client app',
|
||||
'secret': self._get_client_secret()
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import re
|
||||
from sysinv.helm import base
|
||||
from sysinv.helm import common
|
||||
|
||||
|
@ -63,22 +62,11 @@ class ElasticBaseHelm(base.BaseHelm):
|
|||
|
||||
def get_system_info_overrides(self):
|
||||
# Get the system name and system uuid from the database
|
||||
# for use in setting overrides. Also returns a massaged
|
||||
# version of the system name for use in elasticsearch index,
|
||||
# and beats templates.
|
||||
#
|
||||
# Since the system_name_for_index is used as the index name
|
||||
# in elasticsearch, in the beats templates, and in also in the url
|
||||
# setting up the templates, we must be fairly restrictive here.
|
||||
# The Helm Chart repeats this same regular expression substitution,
|
||||
# but we perform it here as well so the user can see what is being used
|
||||
# when looking at the overrides.
|
||||
|
||||
# for use in setting overrides.
|
||||
system = self.dbapi.isystem_get_one()
|
||||
|
||||
system_name = system.name.encode('utf8', 'strict')
|
||||
system_uuid = system.uuid.encode('utf8', 'strict')
|
||||
system_name_for_index = re.sub('[^A-Za-z0-9-]+', '', system_name.lower())
|
||||
|
||||
# fields must be set to a non-empty value.
|
||||
if not system_name:
|
||||
|
@ -88,4 +76,4 @@ class ElasticBaseHelm(base.BaseHelm):
|
|||
"uid": system_uuid,
|
||||
}
|
||||
|
||||
return system_fields, system_name_for_index
|
||||
return system_fields
|
||||
|
|
|
@ -21,13 +21,18 @@ class ElasticsearchClientHelm(elastic.ElasticBaseHelm):
|
|||
if utils.is_aio_simplex_system(self.dbapi):
|
||||
replicas = 1
|
||||
|
||||
if self._is_ipv6_cluster_service():
|
||||
ipv6JavaOpts = "-Djava.net.preferIPv6Addresses=true "
|
||||
else:
|
||||
ipv6JavaOpts = ""
|
||||
|
||||
if (utils.is_aio_system(self.dbapi) and not
|
||||
self._is_distributed_cloud_role_system_controller()):
|
||||
esJavaOpts = \
|
||||
"-Djava.net.preferIPv6Addresses=true -Xmx512m -Xms512m"
|
||||
ipv6JavaOpts + "-Xmx512m -Xms512m"
|
||||
else:
|
||||
esJavaOpts = \
|
||||
"-Djava.net.preferIPv6Addresses=true -Xmx1024m -Xms1024m"
|
||||
ipv6JavaOpts + "-Xmx1024m -Xms1024m"
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
|
|
|
@ -31,13 +31,19 @@ class ElasticsearchDataHelm(elastic.ElasticBaseHelm):
|
|||
|
||||
if utils.is_aio_simplex_system(self.dbapi):
|
||||
replicas = 1
|
||||
|
||||
if self._is_ipv6_cluster_service():
|
||||
ipv6JavaOpts = "-Djava.net.preferIPv6Addresses=true "
|
||||
else:
|
||||
ipv6JavaOpts = ""
|
||||
|
||||
if (utils.is_aio_system(self.dbapi) and not
|
||||
self._is_distributed_cloud_role_system_controller()):
|
||||
esJavaOpts = \
|
||||
"-Djava.net.preferIPv6Addresses=true -Xmx1536m -Xms1536m"
|
||||
ipv6JavaOpts + "-Xmx1536m -Xms1536m"
|
||||
else:
|
||||
esJavaOpts = \
|
||||
"-Djava.net.preferIPv6Addresses=true -Xmx4096m -Xms4096m"
|
||||
ipv6JavaOpts + "-Xmx4096m -Xms4096m"
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
|
|
|
@ -29,11 +29,16 @@ class ElasticsearchMasterHelm(elastic.ElasticBaseHelm):
|
|||
# pods will be master capable to form a cluster of 3 masters.
|
||||
replicas = 1
|
||||
|
||||
if self._is_ipv6_cluster_service():
|
||||
ipv6JavaOpts = "-Djava.net.preferIPv6Addresses=true "
|
||||
else:
|
||||
ipv6JavaOpts = ""
|
||||
|
||||
if (utils.is_aio_system(self.dbapi) and not
|
||||
self._is_distributed_cloud_role_system_controller()):
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx256m -Xms256m"
|
||||
esJavaOpts = ipv6JavaOpts + "-Xmx256m -Xms256m"
|
||||
else:
|
||||
esJavaOpts = "-Djava.net.preferIPv6Addresses=true -Xmx512m -Xms512m"
|
||||
esJavaOpts = ipv6JavaOpts + "-Xmx512m -Xms512m"
|
||||
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
|
|
|
@ -15,11 +15,10 @@ class FilebeatHelm(elastic.ElasticBaseHelm):
|
|||
CHART = common.HELM_CHART_FILEBEAT
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
system_fields, system_name_for_index = self.get_system_info_overrides()
|
||||
system_fields = self.get_system_info_overrides()
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
'config': self._get_config_overrides(system_fields),
|
||||
'systemNameForIndex': system_name_for_index,
|
||||
'resources': self._get_resources_overrides(),
|
||||
}
|
||||
}
|
||||
|
@ -49,7 +48,8 @@ class FilebeatHelm(elastic.ElasticBaseHelm):
|
|||
"/var/log/syslog",
|
||||
"/var/log/**/*.log"
|
||||
],
|
||||
'type': "log"
|
||||
'type': "log",
|
||||
'close_timeout': "5m"
|
||||
}
|
||||
]
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@ class LogstashHelm(elastic.ElasticBaseHelm):
|
|||
CHART = common.HELM_CHART_LOGSTASH
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
system_fields, system_name_for_index = self.get_system_info_overrides()
|
||||
if utils.is_aio_simplex_system(self.dbapi):
|
||||
replicas = 1
|
||||
else:
|
||||
|
@ -30,7 +29,6 @@ class LogstashHelm(elastic.ElasticBaseHelm):
|
|||
'replicaCount': replicas,
|
||||
'resources': self._get_resources_overrides(),
|
||||
'config': self._get_config(),
|
||||
'systemNameForIndex': system_name_for_index,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
|
|||
CHART = common.HELM_CHART_METRICBEAT
|
||||
|
||||
def get_overrides(self, namespace=None):
|
||||
system_fields, system_name_for_index = self.get_system_info_overrides()
|
||||
system_fields = self.get_system_info_overrides()
|
||||
overrides = {
|
||||
common.HELM_NS_MONITOR: {
|
||||
'systemName': '',
|
||||
|
@ -33,8 +33,7 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
|
|||
self._get_metric_deployment_kubernetes()
|
||||
},
|
||||
'config': self._get_config_overrides(system_fields),
|
||||
},
|
||||
'systemNameForIndex': system_name_for_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,6 +88,10 @@ class MetricbeatHelm(elastic.ElasticBaseHelm):
|
|||
"load",
|
||||
"memory",
|
||||
"process_summary",
|
||||
],
|
||||
"cpu.metrics": [
|
||||
"percentages",
|
||||
"normalized_percentages"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -202,8 +202,12 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
|||
location = "%s.%s" % (self.NOVNCPROXY_SERVICE_NAME,
|
||||
str(endpoint_domain.value).lower())
|
||||
else:
|
||||
location = "%s:%s" % (self._get_oam_address(),
|
||||
self.NOVNCPROXY_NODE_PORT)
|
||||
if self._is_ipv6_cluster_service():
|
||||
location = "[%s]:%s" % (self._get_oam_address(),
|
||||
self.NOVNCPROXY_NODE_PORT)
|
||||
else:
|
||||
location = "%s:%s" % (self._get_oam_address(),
|
||||
self.NOVNCPROXY_NODE_PORT)
|
||||
url = "%s://%s/vnc_auto.html" % (self._get_public_protocol(),
|
||||
location)
|
||||
return url
|
||||
|
@ -388,7 +392,7 @@ class NovaHelm(openstack.OpenstackBaseHelm):
|
|||
cluster_host_ip = None
|
||||
ip_family = None
|
||||
for addr in addresses:
|
||||
if addr.interface_uuid == cluster_host_iface.uuid:
|
||||
if addr.interface_id == cluster_host_iface.id:
|
||||
cluster_host_ip = addr.address
|
||||
ip_family = addr.family
|
||||
|
||||
|
|
|
@ -29,8 +29,8 @@ class OidcClientHelm(DexBaseHelm):
|
|||
'client_secret': self._get_client_secret(),
|
||||
'issuer': "https://%s:%s/dex" % (oam_url, self.DEX_NODE_PORT),
|
||||
'issuer_root_ca': '/home/dex-ca.pem',
|
||||
'listen': 'http://0.0.0.0:5555',
|
||||
'redirect_uri': "http://%s:%s/callback" % (oam_url, self.OIDC_CLIENT_NODE_PORT),
|
||||
'listen': 'https://0.0.0.0:5555',
|
||||
'redirect_uri': "https://%s:%s/callback" % (oam_url, self.OIDC_CLIENT_NODE_PORT),
|
||||
},
|
||||
'service': {
|
||||
'nodePort': self.OIDC_CLIENT_NODE_PORT
|
||||
|
|
|
@ -51,6 +51,9 @@ class RabbitmqHelm(openstack.OpenstackBaseHelm):
|
|||
'size': "%d" % (io_thread_pool_size)
|
||||
},
|
||||
'endpoints': self._get_endpoints_overrides(),
|
||||
'manifests': {
|
||||
'config_ipv6': self._is_ipv6_cluster_service()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ class Address(base.SysinvObject):
|
|||
fields = {'id': int,
|
||||
'uuid': utils.uuid_or_none,
|
||||
'forihostid': utils.int_or_none,
|
||||
'interface_uuid': utils.uuid_or_none,
|
||||
'interface_id': utils.int_or_none,
|
||||
'pool_uuid': utils.uuid_or_none,
|
||||
'ifname': utils.str_or_none,
|
||||
'family': utils.int_or_none,
|
||||
|
@ -32,7 +32,7 @@ class Address(base.SysinvObject):
|
|||
'name': utils.str_or_none,
|
||||
}
|
||||
|
||||
_foreign_fields = {'interface_uuid': 'interface:uuid',
|
||||
_foreign_fields = {'interface_id': 'interface:id',
|
||||
'pool_uuid': 'address_pool:uuid',
|
||||
'ifname': 'interface:ifname',
|
||||
'forihostid': 'interface:forihostid'}
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Cloudscaling Group, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,57 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""DB related custom exceptions."""
|
||||
|
||||
from sysinv._i18n import _
|
||||
|
||||
|
||||
class DBError(Exception):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, inner_exception=None):
|
||||
self.inner_exception = inner_exception
|
||||
super(DBError, self).__init__(str(inner_exception))
|
||||
|
||||
|
||||
class DBDuplicateEntry(DBError):
|
||||
"""Wraps an implementation specific exception."""
|
||||
def __init__(self, columns=None, inner_exception=None):
|
||||
if columns is None:
|
||||
self.columns = []
|
||||
else:
|
||||
self.columns = columns
|
||||
super(DBDuplicateEntry, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBDeadlock(DBError):
|
||||
def __init__(self, inner_exception=None):
|
||||
super(DBDeadlock, self).__init__(inner_exception)
|
||||
|
||||
|
||||
class DBInvalidUnicodeParameter(Exception):
|
||||
message = _("Invalid Parameter: "
|
||||
"Unicode is not supported by the current database.")
|
||||
|
||||
|
||||
class DbMigrationError(DBError):
|
||||
"""Wraps migration specific exception."""
|
||||
def __init__(self, message=None):
|
||||
super(DbMigrationError, self).__init__(str(message))
|
||||
|
||||
|
||||
class DBConnectionError(DBError):
|
||||
"""Wraps connection specific exception."""
|
||||
pass
|
|
@ -1,16 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Cloudscaling Group, Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
|
@ -1,720 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Session Handling for SQLAlchemy backend.
|
||||
|
||||
Initializing:
|
||||
|
||||
* Call set_defaults with the minimal of the following kwargs:
|
||||
sql_connection, sqlite_db
|
||||
|
||||
Example:
|
||||
|
||||
session.set_defaults(
|
||||
sql_connection="sqlite:///var/lib/sysinv.sqlite.db",
|
||||
sqlite_db="/var/lib/sysinv/sqlite.db")
|
||||
|
||||
Recommended ways to use sessions within this framework:
|
||||
|
||||
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
|
||||
model_query() will implicitly use a session when called without one
|
||||
supplied. This is the ideal situation because it will allow queries
|
||||
to be automatically retried if the database connection is interrupted.
|
||||
|
||||
Note: Automatic retry will be enabled in a future patch.
|
||||
|
||||
It is generally fine to issue several queries in a row like this. Even though
|
||||
they may be run in separate transactions and/or separate sessions, each one
|
||||
will see the data from the prior calls. If needed, undo- or rollback-like
|
||||
functionality should be handled at a logical level. For an example, look at
|
||||
the code around quotas and reservation_rollback().
|
||||
|
||||
Examples:
|
||||
|
||||
def get_foo(context, foo):
|
||||
return model_query(context, models.Foo).\
|
||||
filter_by(foo=foo).\
|
||||
first()
|
||||
|
||||
def update_foo(context, id, newfoo):
|
||||
model_query(context, models.Foo).\
|
||||
filter_by(id=id).\
|
||||
update({'foo': newfoo})
|
||||
|
||||
def create_foo(context, values):
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(values)
|
||||
foo_ref.save()
|
||||
return foo_ref
|
||||
|
||||
|
||||
* Within the scope of a single method, keeping all the reads and writes within
|
||||
the context managed by a single session. In this way, the session's __exit__
|
||||
handler will take care of calling flush() and commit() for you.
|
||||
If using this approach, you should not explicitly call flush() or commit().
|
||||
Any error within the context of the session will cause the session to emit
|
||||
a ROLLBACK. If the connection is dropped before this is possible, the
|
||||
database will implicitly rollback the transaction.
|
||||
|
||||
Note: statements in the session scope will not be automatically retried.
|
||||
|
||||
If you create models within the session, they need to be added, but you
|
||||
do not need to call model.save()
|
||||
|
||||
def create_many_foo(context, foos):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
for foo in foos:
|
||||
foo_ref = models.Foo()
|
||||
foo_ref.update(foo)
|
||||
session.add(foo_ref)
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
foo_ref = model_query(context, models.Foo, session).\
|
||||
filter_by(id=foo_id).\
|
||||
first()
|
||||
model_query(context, models.Bar, session).\
|
||||
filter_by(id=foo_ref['bar_id']).\
|
||||
update({'bar': newbar})
|
||||
|
||||
Note: update_bar is a trivially simple example of using "with session.begin".
|
||||
Whereas create_many_foo is a good example of when a transaction is needed,
|
||||
it is always best to use as few queries as possible. The two queries in
|
||||
update_bar can be better expressed using a single query which avoids
|
||||
the need for an explicit transaction. It can be expressed like so:
|
||||
|
||||
def update_bar(context, foo_id, newbar):
|
||||
subq = model_query(context, models.Foo.id).\
|
||||
filter_by(id=foo_id).\
|
||||
limit(1).\
|
||||
subquery()
|
||||
model_query(context, models.Bar).\
|
||||
filter_by(id=subq.as_scalar()).\
|
||||
update({'bar': newbar})
|
||||
|
||||
For reference, this emits approximagely the following SQL statement:
|
||||
|
||||
UPDATE bar SET bar = ${newbar}
|
||||
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
|
||||
|
||||
* Passing an active session between methods. Sessions should only be passed
|
||||
to private methods. The private method must use a subtransaction; otherwise
|
||||
SQLAlchemy will throw an error when you call session.begin() on an existing
|
||||
transaction. Public methods should not accept a session parameter and should
|
||||
not be involved in sessions within the caller's scope.
|
||||
|
||||
Note that this incurs more overhead in SQLAlchemy than the above means
|
||||
due to nesting transactions, and it is not possible to implicitly retry
|
||||
failed database operations when using this approach.
|
||||
|
||||
This also makes code somewhat more difficult to read and debug, because a
|
||||
single database transaction spans more than one method. Error handling
|
||||
becomes less clear in this situation. When this is needed for code clarity,
|
||||
it should be clearly documented.
|
||||
|
||||
def myfunc(foo):
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
# do some database things
|
||||
bar = _private_func(foo, session)
|
||||
return bar
|
||||
|
||||
def _private_func(foo, session=None):
|
||||
if not session:
|
||||
session = get_session()
|
||||
with session.begin(subtransaction=True):
|
||||
# do some other database things
|
||||
return bar
|
||||
|
||||
|
||||
There are some things which it is best to avoid:
|
||||
|
||||
* Don't keep a transaction open any longer than necessary.
|
||||
|
||||
This means that your "with session.begin()" block should be as short
|
||||
as possible, while still containing all the related calls for that
|
||||
transaction.
|
||||
|
||||
* Avoid "with_lockmode('UPDATE')" when possible.
|
||||
|
||||
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
|
||||
any rows, it will take a gap-lock. This is a form of write-lock on the
|
||||
"gap" where no rows exist, and prevents any other writes to that space.
|
||||
This can effectively prevent any INSERT into a table by locking the gap
|
||||
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
|
||||
has an overly broad WHERE clause, or doesn't properly use an index.
|
||||
|
||||
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
|
||||
number of rows matching a query, and if only one row is returned,
|
||||
then issue the SELECT FOR UPDATE.
|
||||
|
||||
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
|
||||
However, this can not be done until the "deleted" columns are removed and
|
||||
proper UNIQUE constraints are added to the tables.
|
||||
|
||||
|
||||
Enabling soft deletes:
|
||||
|
||||
* To use/enable soft-deletes, the SoftDeleteMixin must be added
|
||||
to your model class. For example:
|
||||
|
||||
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
|
||||
pass
|
||||
|
||||
|
||||
Efficient use of soft deletes:
|
||||
|
||||
* There are two possible ways to mark a record as deleted:
|
||||
model.soft_delete() and query.soft_delete().
|
||||
|
||||
model.soft_delete() method works with single already fetched entry.
|
||||
query.soft_delete() makes only one db request for all entries that correspond
|
||||
to query.
|
||||
|
||||
* In almost all cases you should use query.soft_delete(). Some examples:
|
||||
|
||||
def soft_delete_bar():
|
||||
count = model_query(BarModel).find(some_condition).soft_delete()
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
def complex_soft_delete_with_synchronization_bar(session=None):
|
||||
if session is None:
|
||||
session = get_session()
|
||||
with session.begin(subtransactions=True):
|
||||
count = model_query(BarModel).\
|
||||
find(some_condition).\
|
||||
soft_delete(synchronize_session=True)
|
||||
# Here synchronize_session is required, because we
|
||||
# don't know what is going on in outer session.
|
||||
if count == 0:
|
||||
raise Exception("0 entries were soft deleted")
|
||||
|
||||
* There is only one situation where model.soft_delete() is appropriate: when
|
||||
you fetch a single record, work with it, and mark it as deleted in the same
|
||||
transaction.
|
||||
|
||||
def soft_delete_bar_model():
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
bar_ref = model_query(BarModel).find(some_condition).first()
|
||||
# Work with bar_ref
|
||||
bar_ref.soft_delete(session=session)
|
||||
|
||||
However, if you need to work with all entries that correspond to query and
|
||||
then soft delete them you should use query.soft_delete() method:
|
||||
|
||||
def soft_delete_multi_models():
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
query = model_query(BarModel, session=session).\
|
||||
find(some_condition)
|
||||
model_refs = query.all()
|
||||
# Work with model_refs
|
||||
query.soft_delete(synchronize_session=False)
|
||||
# synchronize_session=False should be set if there is no outer
|
||||
# session and these entries are not used after this.
|
||||
|
||||
When working with many rows, it is very important to use query.soft_delete,
|
||||
which issues a single query. Using model.soft_delete(), as in the following
|
||||
example, is very inefficient.
|
||||
|
||||
for bar_ref in bar_refs:
|
||||
bar_ref.soft_delete(session=session)
|
||||
# This will produce count(bar_refs) db requests.
|
||||
"""
|
||||
|
||||
import os.path
|
||||
import re
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from eventlet import greenthread
|
||||
from eventlet.green import threading
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
from sqlalchemy import exc as sqla_exc
|
||||
import sqlalchemy.interfaces
|
||||
from sqlalchemy.interfaces import PoolListener
|
||||
import sqlalchemy.orm
|
||||
from sqlalchemy.pool import NullPool, StaticPool
|
||||
from sqlalchemy.sql.expression import literal_column
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
from sysinv._i18n import _
|
||||
from sysinv.openstack.common.db import exception
|
||||
|
||||
DEFAULT = 'DEFAULT'
|
||||
|
||||
sqlite_db_opts = [
|
||||
cfg.StrOpt('sqlite_db',
|
||||
default='sysinv.sqlite',
|
||||
help='the filename to use with sqlite'),
|
||||
cfg.BoolOpt('sqlite_synchronous',
|
||||
default=True,
|
||||
help='If true, use synchronous mode for sqlite'),
|
||||
]
|
||||
|
||||
database_opts = [
|
||||
cfg.StrOpt('connection',
|
||||
default='sqlite:///' +
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
'../', '$sqlite_db')),
|
||||
help='The SQLAlchemy connection string used to connect to the '
|
||||
'database',
|
||||
deprecated_name='sql_connection',
|
||||
deprecated_group=DEFAULT,
|
||||
secret=True),
|
||||
cfg.IntOpt('idle_timeout',
|
||||
default=3600,
|
||||
deprecated_name='sql_idle_timeout',
|
||||
deprecated_group=DEFAULT,
|
||||
help='timeout before idle sql connections are reaped'),
|
||||
cfg.IntOpt('min_pool_size',
|
||||
default=1,
|
||||
deprecated_name='sql_min_pool_size',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Minimum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_pool_size',
|
||||
default=50,
|
||||
deprecated_name='sql_max_pool_size',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Maximum number of SQL connections to keep open in a '
|
||||
'pool'),
|
||||
cfg.IntOpt('max_retries',
|
||||
default=10,
|
||||
deprecated_name='sql_max_retries',
|
||||
deprecated_group=DEFAULT,
|
||||
help='maximum db connection retries during startup. '
|
||||
'(setting -1 implies an infinite retry count)'),
|
||||
cfg.IntOpt('retry_interval',
|
||||
default=10,
|
||||
deprecated_name='sql_retry_interval',
|
||||
deprecated_group=DEFAULT,
|
||||
help='interval between retries of opening a sql connection'),
|
||||
cfg.IntOpt('max_overflow',
|
||||
default=100,
|
||||
deprecated_name='sql_max_overflow',
|
||||
deprecated_group=DEFAULT,
|
||||
help='If set, use this value for max_overflow with sqlalchemy'),
|
||||
cfg.IntOpt('connection_debug',
|
||||
default=0,
|
||||
deprecated_name='sql_connection_debug',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Verbosity of SQL debugging information. 0=None, '
|
||||
'100=Everything'),
|
||||
cfg.BoolOpt('connection_trace',
|
||||
default=False,
|
||||
deprecated_name='sql_connection_trace',
|
||||
deprecated_group=DEFAULT,
|
||||
help='Add python stack traces to SQL as comment strings'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(sqlite_db_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
if not hasattr(CONF.database, 'connection'):
|
||||
CONF.register_opts(database_opts, 'database')
|
||||
|
||||
|
||||
_ENGINE = None
|
||||
_MAKER = None
|
||||
|
||||
|
||||
def set_defaults(sql_connection, sqlite_db):
|
||||
"""Set defaults for configuration variables."""
|
||||
cfg.set_defaults(database_opts,
|
||||
connection=sql_connection)
|
||||
cfg.set_defaults(sqlite_db_opts,
|
||||
sqlite_db=sqlite_db)
|
||||
|
||||
|
||||
def cleanup():
|
||||
global _ENGINE, _MAKER
|
||||
|
||||
if _MAKER:
|
||||
_MAKER.close_all() # pylint: disable=no-member
|
||||
_MAKER = None
|
||||
if _ENGINE:
|
||||
_ENGINE.dispose()
|
||||
_ENGINE = None
|
||||
|
||||
|
||||
class SqliteForeignKeysListener(PoolListener):
|
||||
"""
|
||||
Ensures that the foreign key constraints are enforced in SQLite.
|
||||
|
||||
The foreign key constraints are disabled by default in SQLite,
|
||||
so the foreign key constraints will be enabled here for every
|
||||
database connection
|
||||
"""
|
||||
def connect(self, dbapi_con, con_record):
|
||||
dbapi_con.execute('pragma foreign_keys=ON')
|
||||
|
||||
|
||||
def get_session(autocommit=True, expire_on_commit=False,
|
||||
sqlite_fk=False):
|
||||
"""Return a greenthread scoped SQLAlchemy session."""
|
||||
|
||||
if _ENGINE is None:
|
||||
engine = get_engine(sqlite_fk=sqlite_fk)
|
||||
|
||||
engine = _ENGINE
|
||||
scoped_session = get_maker(engine, autocommit, expire_on_commit)
|
||||
|
||||
LOG.debug("get_session scoped_session=%s" % (scoped_session))
|
||||
return scoped_session
|
||||
|
||||
|
||||
# note(boris-42): In current versions of DB backends unique constraint
|
||||
# violation messages follow the structure:
|
||||
#
|
||||
# sqlite:
|
||||
# 1 column - (IntegrityError) column c1 is not unique
|
||||
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
|
||||
#
|
||||
# postgres:
|
||||
# 1 column - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "users_c1_key"
|
||||
# N columns - (IntegrityError) duplicate key value violates unique
|
||||
# constraint "name_of_our_constraint"
|
||||
#
|
||||
# mysql:
|
||||
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
|
||||
# 'c1'")
|
||||
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
|
||||
# with -' for key 'name_of_our_constraint'")
|
||||
_DUP_KEY_RE_DB = {
|
||||
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
|
||||
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
|
||||
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
|
||||
"""
|
||||
In this function will be raised DBDuplicateEntry exception if integrity
|
||||
error wrap unique constraint violation.
|
||||
"""
|
||||
|
||||
def get_columns_from_uniq_cons_or_name(columns):
|
||||
# note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3"
|
||||
# means that columns c1, c2, c3 are in UniqueConstraint.
|
||||
uniqbase = "uniq_"
|
||||
if not columns.startswith(uniqbase):
|
||||
if engine_name == "postgresql":
|
||||
return [columns[columns.index("_") + 1:columns.rindex("_")]]
|
||||
return [columns]
|
||||
return columns[len(uniqbase):].split("_x_")
|
||||
|
||||
if engine_name not in ["mysql", "sqlite", "postgresql"]:
|
||||
return
|
||||
|
||||
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
|
||||
if not m:
|
||||
return
|
||||
columns = m.group(1)
|
||||
|
||||
if engine_name == "sqlite":
|
||||
columns = columns.strip().split(", ")
|
||||
else:
|
||||
columns = get_columns_from_uniq_cons_or_name(columns)
|
||||
raise exception.DBDuplicateEntry(columns, integrity_error)
|
||||
|
||||
|
||||
# NOTE(comstud): In current versions of DB backends, Deadlock violation
|
||||
# messages follow the structure:
|
||||
#
|
||||
# mysql:
|
||||
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
|
||||
# 'restarting transaction') <query_str> <query_args>
|
||||
_DEADLOCK_RE_DB = {
|
||||
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
|
||||
}
|
||||
|
||||
|
||||
def _raise_if_deadlock_error(operational_error, engine_name):
|
||||
"""
|
||||
Raise DBDeadlock exception if OperationalError contains a Deadlock
|
||||
condition.
|
||||
"""
|
||||
re = _DEADLOCK_RE_DB.get(engine_name)
|
||||
if re is None:
|
||||
return
|
||||
m = re.match(operational_error.message)
|
||||
if not m:
|
||||
return
|
||||
raise exception.DBDeadlock(operational_error)
|
||||
|
||||
|
||||
def _wrap_db_error(f):
|
||||
def _wrap(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except UnicodeEncodeError:
|
||||
raise exception.DBInvalidUnicodeParameter()
|
||||
# note(boris-42): We should catch unique constraint violation and
|
||||
# wrap it by our own DBDuplicateEntry exception. Unique constraint
|
||||
# violation is wrapped by IntegrityError.
|
||||
except sqla_exc.OperationalError as e:
|
||||
_raise_if_deadlock_error(e, get_engine().name)
|
||||
# NOTE(comstud): A lot of code is checking for OperationalError
|
||||
# so let's not wrap it for now.
|
||||
raise
|
||||
except sqla_exc.IntegrityError as e:
|
||||
# note(boris-42): SqlAlchemy doesn't unify errors from different
|
||||
# DBs so we must do this. Also in some tables (for example
|
||||
# instance_types) there are more than one unique constraint. This
|
||||
# means we should get names of columns, which values violate
|
||||
# unique constraint, from error message.
|
||||
_raise_if_duplicate_entry_error(e, get_engine().name)
|
||||
raise exception.DBError(e)
|
||||
except Exception as e:
|
||||
LOG.exception(_('DB exception wrapped.'))
|
||||
raise exception.DBError(e)
|
||||
_wrap.__name__ = f.__name__
|
||||
return _wrap
|
||||
|
||||
|
||||
def get_engine(sqlite_fk=False):
|
||||
"""Return a SQLAlchemy engine."""
|
||||
global _ENGINE
|
||||
if _ENGINE is None:
|
||||
_ENGINE = create_engine(CONF.database.connection,
|
||||
sqlite_fk=sqlite_fk)
|
||||
return _ENGINE
|
||||
|
||||
|
||||
def _synchronous_switch_listener(dbapi_conn, connection_rec):
|
||||
"""Switch sqlite connections to non-synchronous mode."""
|
||||
dbapi_conn.execute("PRAGMA synchronous = OFF")
|
||||
|
||||
|
||||
def _add_regexp_listener(dbapi_con, con_record):
|
||||
"""Add REGEXP function to sqlite connections."""
|
||||
|
||||
def regexp(expr, item):
|
||||
reg = re.compile(expr)
|
||||
return reg.search(six.text_type(item)) is not None
|
||||
dbapi_con.create_function('regexp', 2, regexp)
|
||||
|
||||
|
||||
def _greenthread_yield(dbapi_con, con_record):
|
||||
"""
|
||||
Ensure other greenthreads get a chance to execute by forcing a context
|
||||
switch. With common database backends (eg MySQLdb and sqlite), there is
|
||||
no implicit yield caused by network I/O since they are implemented by
|
||||
C libraries that eventlet cannot monkey patch.
|
||||
"""
|
||||
greenthread.sleep(0)
|
||||
|
||||
|
||||
def _ping_listener(dbapi_conn, connection_rec, connection_proxy):
|
||||
"""
|
||||
Ensures that MySQL connections checked out of the
|
||||
pool are alive.
|
||||
|
||||
Borrowed from:
|
||||
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
|
||||
"""
|
||||
try:
|
||||
dbapi_conn.cursor().execute('select 1')
|
||||
except dbapi_conn.OperationalError as ex:
|
||||
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
|
||||
LOG.warn(_('Got mysql server has gone away: %s'), ex)
|
||||
raise sqla_exc.DisconnectionError("Database server went away")
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def _is_db_connection_error(args):
|
||||
"""Return True if error in connecting to db."""
|
||||
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
|
||||
# to support Postgres and others.
|
||||
conn_err_codes = ('2002', '2003', '2006')
|
||||
for err_code in conn_err_codes:
|
||||
if args.find(err_code) != -1:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def create_engine(sql_connection, sqlite_fk=False):
|
||||
"""Return a new SQLAlchemy engine."""
|
||||
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
|
||||
|
||||
engine_args = {
|
||||
"pool_recycle": CONF.database.idle_timeout,
|
||||
"echo": False,
|
||||
'convert_unicode': True,
|
||||
}
|
||||
|
||||
# Map our SQL debug level to SQLAlchemy's options
|
||||
if CONF.database.connection_debug >= 100:
|
||||
engine_args['echo'] = 'debug'
|
||||
elif CONF.database.connection_debug >= 50:
|
||||
engine_args['echo'] = True
|
||||
|
||||
if "sqlite" in connection_dict.drivername:
|
||||
if sqlite_fk:
|
||||
engine_args["listeners"] = [SqliteForeignKeysListener()]
|
||||
engine_args["poolclass"] = NullPool
|
||||
|
||||
if CONF.database.connection == "sqlite://":
|
||||
engine_args["poolclass"] = StaticPool
|
||||
engine_args["connect_args"] = {'check_same_thread': False}
|
||||
else:
|
||||
engine_args['pool_size'] = CONF.database.max_pool_size
|
||||
if CONF.database.max_overflow is not None:
|
||||
engine_args['max_overflow'] = CONF.database.max_overflow
|
||||
|
||||
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
|
||||
|
||||
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
|
||||
|
||||
if 'mysql' in connection_dict.drivername:
|
||||
sqlalchemy.event.listen(engine, 'checkout', _ping_listener)
|
||||
elif 'sqlite' in connection_dict.drivername:
|
||||
if not CONF.sqlite_synchronous:
|
||||
sqlalchemy.event.listen(engine, 'connect',
|
||||
_synchronous_switch_listener)
|
||||
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
|
||||
|
||||
if (CONF.database.connection_trace and
|
||||
engine.dialect.dbapi.__name__ == 'MySQLdb'):
|
||||
_patch_mysqldb_with_stacktrace_comments()
|
||||
|
||||
try:
|
||||
engine.connect()
|
||||
except sqla_exc.OperationalError as e:
|
||||
if not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
|
||||
remaining = CONF.database.max_retries
|
||||
if remaining == -1:
|
||||
remaining = 'infinite'
|
||||
while True:
|
||||
msg = _('SQL connection failed. %s attempts left.')
|
||||
LOG.warn(msg % remaining)
|
||||
if remaining != 'infinite':
|
||||
remaining -= 1
|
||||
time.sleep(CONF.database.retry_interval)
|
||||
try:
|
||||
engine.connect()
|
||||
break
|
||||
except sqla_exc.OperationalError as e:
|
||||
if (remaining != 'infinite' and remaining == 0) or \
|
||||
not _is_db_connection_error(e.args[0]):
|
||||
raise
|
||||
return engine
|
||||
|
||||
|
||||
class Query(sqlalchemy.orm.query.Query):
|
||||
"""Subclass of sqlalchemy.query with soft_delete() method."""
|
||||
def soft_delete(self, synchronize_session='evaluate'):
|
||||
return self.update({'deleted': literal_column('id'),
|
||||
'updated_at': literal_column('updated_at'),
|
||||
'deleted_at': timeutils.utcnow()},
|
||||
synchronize_session=synchronize_session)
|
||||
|
||||
|
||||
class Session(sqlalchemy.orm.session.Session):
|
||||
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
|
||||
@_wrap_db_error
|
||||
def query(self, *args, **kwargs):
|
||||
return super(Session, self).query(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def flush(self, *args, **kwargs):
|
||||
return super(Session, self).flush(*args, **kwargs)
|
||||
|
||||
@_wrap_db_error
|
||||
def execute(self, *args, **kwargs):
|
||||
return super(Session, self).execute(*args, **kwargs)
|
||||
|
||||
|
||||
def get_thread_id():
|
||||
thread_id = id(eventlet.greenthread.getcurrent())
|
||||
|
||||
return thread_id
|
||||
|
||||
|
||||
def get_maker(engine, autocommit=True, expire_on_commit=False):
|
||||
"""Return a SQLAlchemy sessionmaker using the given engine."""
|
||||
global _MAKER
|
||||
|
||||
if _MAKER is None:
|
||||
scopefunc = get_thread_id()
|
||||
_MAKER = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(bind=engine,
|
||||
class_=Session,
|
||||
autocommit=autocommit,
|
||||
expire_on_commit=expire_on_commit,
|
||||
query_cls=Query),
|
||||
scopefunc=get_thread_id)
|
||||
|
||||
LOG.info("get_maker greenthread current_thread=%s session=%s "
|
||||
"autocommit=%s, scopefunc=%s" %
|
||||
(threading.current_thread(), _MAKER, autocommit, scopefunc))
|
||||
return _MAKER
|
||||
|
||||
|
||||
def _patch_mysqldb_with_stacktrace_comments():
|
||||
"""Adds current stack trace as a comment in queries by patching
|
||||
MySQLdb.cursors.BaseCursor._do_query.
|
||||
"""
|
||||
import MySQLdb.cursors
|
||||
import traceback
|
||||
|
||||
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
|
||||
|
||||
def _do_query(self, q):
|
||||
stack = ''
|
||||
for file, line, method, function in traceback.extract_stack():
|
||||
# exclude various common things from trace
|
||||
if file.endswith('session.py') and method == '_do_query':
|
||||
continue
|
||||
if file.endswith('api.py') and method == 'wrapper':
|
||||
continue
|
||||
if file.endswith('utils.py') and method == '_inner':
|
||||
continue
|
||||
if file.endswith('exception.py') and method == '_wrap':
|
||||
continue
|
||||
# db/api is just a wrapper around db/sqlalchemy/api
|
||||
if file.endswith('db/api.py'):
|
||||
continue
|
||||
# only trace inside sysinv
|
||||
index = file.rfind('sysinv')
|
||||
if index == -1:
|
||||
continue
|
||||
stack += "File:%s:%s Method:%s() Line:%s | " \
|
||||
% (file[index:], line, method, function)
|
||||
|
||||
# strip trailing " | " from stack
|
||||
if stack:
|
||||
stack = stack[:-3]
|
||||
qq = "%s /* %s */" % (q, stack)
|
||||
else:
|
||||
qq = q
|
||||
old_mysql_do_query(self, qq)
|
||||
|
||||
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
|
|
@ -1,143 +0,0 @@
|
|||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2010-2011 OpenStack Foundation.
|
||||
# Copyright 2012 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Implementation of paginate query."""
|
||||
|
||||
import sqlalchemy
|
||||
|
||||
from oslo_log import log as logging
|
||||
from sysinv._i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InvalidSortKey(Exception):
|
||||
message = _("Sort key supplied was not valid.")
|
||||
|
||||
|
||||
# copy from glance/db/sqlalchemy/api.py
|
||||
def paginate_query(query, model, limit, sort_keys, marker=None,
|
||||
sort_dir=None, sort_dirs=None):
|
||||
"""Returns a query with sorting / pagination criteria added.
|
||||
|
||||
Pagination works by requiring a unique sort_key, specified by sort_keys.
|
||||
(If sort_keys is not unique, then we risk looping through values.)
|
||||
We use the last row in the previous page as the 'marker' for pagination.
|
||||
So we must return values that follow the passed marker in the order.
|
||||
With a single-valued sort_key, this would be easy: sort_key > X.
|
||||
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
|
||||
the lexicographical ordering:
|
||||
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
|
||||
|
||||
We also have to cope with different sort_directions.
|
||||
|
||||
Typically, the id of the last row is used as the client-facing pagination
|
||||
marker, then the actual marker object must be fetched from the db and
|
||||
passed in to us as marker.
|
||||
|
||||
:param query: the query object to which we should add paging/sorting
|
||||
:param model: the ORM model class
|
||||
:param limit: maximum number of items to return
|
||||
:param sort_keys: array of attributes by which results should be sorted
|
||||
:param marker: the last item of the previous page; we returns the next
|
||||
results after this value.
|
||||
:param sort_dir: direction in which results should be sorted (asc, desc)
|
||||
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
|
||||
|
||||
:rtype: sqlalchemy.orm.query.Query
|
||||
:return: The query with sorting/pagination added.
|
||||
"""
|
||||
|
||||
if 'id' not in sort_keys:
|
||||
# TODO(justinsb): If this ever gives a false-positive, check
|
||||
# the actual primary key, rather than assuming its id
|
||||
LOG.warn(_('id not in sort_keys; is sort_keys unique?'))
|
||||
|
||||
assert(not (sort_dir and sort_dirs))
|
||||
|
||||
# Default the sort direction to ascending
|
||||
if sort_dirs is None and sort_dir is None:
|
||||
sort_dir = 'asc'
|
||||
|
||||
# Ensure a per-column sort direction
|
||||
if sort_dirs is None:
|
||||
sort_dirs = [sort_dir for _sort_key in sort_keys]
|
||||
|
||||
assert(len(sort_dirs) == len(sort_keys))
|
||||
|
||||
# Add sorting
|
||||
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
|
||||
sort_dir_func = {
|
||||
'asc': sqlalchemy.asc,
|
||||
'desc': sqlalchemy.desc,
|
||||
}[current_sort_dir]
|
||||
|
||||
try:
|
||||
sort_key_attr = getattr(model, current_sort_key)
|
||||
except AttributeError:
|
||||
LOG.error('%s is not a valid sort key' % (current_sort_key))
|
||||
raise InvalidSortKey()
|
||||
query = query.order_by(sort_dir_func(sort_key_attr))
|
||||
|
||||
# Add pagination
|
||||
if marker is not None:
|
||||
marker_values = []
|
||||
for sort_key in sort_keys:
|
||||
v = getattr(marker, sort_key)
|
||||
marker_values.append(v)
|
||||
|
||||
# Build up an array of sort criteria as in the docstring
|
||||
criteria_list = []
|
||||
for i in range(0, len(sort_keys)):
|
||||
crit_attrs = []
|
||||
for j in range(0, i):
|
||||
model_attr = getattr(model, sort_keys[j])
|
||||
crit_attrs.append((model_attr == marker_values[j]))
|
||||
|
||||
model_attr = getattr(model, sort_keys[i])
|
||||
if sort_dirs[i] == 'desc':
|
||||
crit_attrs.append((model_attr < marker_values[i]))
|
||||
elif sort_dirs[i] == 'asc':
|
||||
crit_attrs.append((model_attr > marker_values[i]))
|
||||
else:
|
||||
raise ValueError(_("Unknown sort direction, "
|
||||
"must be 'desc' or 'asc'"))
|
||||
|
||||
criteria = sqlalchemy.sql.and_(*crit_attrs)
|
||||
criteria_list.append(criteria)
|
||||
|
||||
f = sqlalchemy.sql.or_(*criteria_list)
|
||||
query = query.filter(f)
|
||||
|
||||
if limit is not None:
|
||||
query = query.limit(limit)
|
||||
|
||||
return query
|
||||
|
||||
|
||||
def get_table(engine, name):
|
||||
"""Returns an sqlalchemy table dynamically from db.
|
||||
|
||||
Needed because the models don't work for us in migrations
|
||||
as models will be far out of sync with the current data.
|
||||
"""
|
||||
metadata = sqlalchemy.MetaData()
|
||||
metadata.bind = engine
|
||||
return sqlalchemy.Table(name, metadata, autoload=True)
|
|
@ -28,7 +28,8 @@ PLATFORM_NETWORK_TYPES = [constants.NETWORK_TYPE_PXEBOOT,
|
|||
constants.NETWORK_TYPE_MGMT,
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.NETWORK_TYPE_OAM,
|
||||
constants.NETWORK_TYPE_IRONIC]
|
||||
constants.NETWORK_TYPE_IRONIC,
|
||||
constants.NETWORK_TYPE_STORAGE]
|
||||
|
||||
DATA_NETWORK_TYPES = [constants.NETWORK_TYPE_DATA]
|
||||
|
||||
|
@ -291,6 +292,19 @@ class InterfacePuppet(base.BasePuppet):
|
|||
except exception.AddressNotFoundByName:
|
||||
pass
|
||||
|
||||
try:
|
||||
storage_address = self._get_address_by_name(
|
||||
constants.CONTROLLER_HOSTNAME, constants.NETWORK_TYPE_STORAGE)
|
||||
|
||||
storage_floating_ip = (str(storage_address.address) + '/' +
|
||||
str(storage_address.prefix))
|
||||
|
||||
floating_ips.update({
|
||||
constants.NETWORK_TYPE_STORAGE: storage_floating_ip,
|
||||
})
|
||||
except exception.AddressNotFoundByName:
|
||||
pass
|
||||
|
||||
return floating_ips
|
||||
|
||||
def _get_datanetworks(self, host):
|
||||
|
@ -646,6 +660,13 @@ def get_interface_address_method(context, iface, network_id=None):
|
|||
# natively supported in vswitch or need to be shared with the kernel
|
||||
# because of a platform VLAN should be left as manual config
|
||||
return MANUAL_METHOD
|
||||
elif (iface.ifclass == constants.INTERFACE_CLASS_PLATFORM and
|
||||
networktype is None and
|
||||
(iface.ipv4_mode == constants.IPV4_STATIC or
|
||||
iface.ipv6_mode == constants.IPV6_STATIC)):
|
||||
# Allow platform-class interface with ipv4 mode set to static to
|
||||
# have static ip address
|
||||
return STATIC_METHOD
|
||||
elif not iface.ifclass or iface.ifclass == constants.INTERFACE_CLASS_NONE \
|
||||
or not networktype:
|
||||
# Interfaces that are configured purely as a dependency from other
|
||||
|
@ -661,6 +682,8 @@ def get_interface_address_method(context, iface, network_id=None):
|
|||
return STATIC_METHOD
|
||||
elif networktype == constants.NETWORK_TYPE_CLUSTER_HOST:
|
||||
return STATIC_METHOD
|
||||
elif networktype == constants.NETWORK_TYPE_STORAGE:
|
||||
return STATIC_METHOD
|
||||
elif networktype == constants.NETWORK_TYPE_PXEBOOT:
|
||||
# All pxeboot interfaces that exist on non-controller nodes are set
|
||||
# to manual as they are not needed/used once the install is done.
|
||||
|
@ -1004,14 +1027,13 @@ def get_sriov_config(context, iface):
|
|||
if not port:
|
||||
return {}
|
||||
|
||||
vf_addr_list = ''
|
||||
vf_addrs = port.get('sriov_vfs_pci_address', None)
|
||||
if not vf_addrs:
|
||||
return {}
|
||||
|
||||
vf_addr_list = vf_addrs.split(',')
|
||||
vf_addr_list = interface.get_sriov_interface_vf_addrs(
|
||||
context, iface, vf_addr_list)
|
||||
vf_addr_list = ",".join(vf_addr_list)
|
||||
if vf_addrs:
|
||||
vf_addr_list = vf_addrs.split(',')
|
||||
vf_addr_list = interface.get_sriov_interface_vf_addrs(
|
||||
context, iface, vf_addr_list)
|
||||
vf_addr_list = ",".join(vf_addr_list)
|
||||
|
||||
if vf_driver:
|
||||
if constants.SRIOV_DRIVER_TYPE_VFIO in vf_driver:
|
||||
|
@ -1027,10 +1049,20 @@ def get_sriov_config(context, iface):
|
|||
|
||||
# Format the vf addresses as quoted strings in order to prevent
|
||||
# puppet from treating the address as a time/date value
|
||||
vf_addrs = [quoted_str(addr.strip()) for addr in vf_addr_list.split(",")]
|
||||
vf_addrs = [quoted_str(addr.strip())
|
||||
for addr in vf_addr_list.split(",") if addr]
|
||||
|
||||
# Include the desired number of VFs if the device supports SR-IOV
|
||||
# config via sysfs and is not a sub-interface
|
||||
num_vfs = None
|
||||
if (not is_a_mellanox_cx3_device(context, iface)
|
||||
and iface['iftype'] != constants.INTERFACE_TYPE_VF):
|
||||
num_vfs = iface['sriov_numvfs']
|
||||
|
||||
config = {
|
||||
'ifname': iface['ifname'],
|
||||
'pf_addr': quoted_str(port['pciaddr'].strip()),
|
||||
'num_vfs': num_vfs,
|
||||
'vf_driver': vf_driver,
|
||||
'vf_addrs': vf_addrs
|
||||
}
|
||||
|
|
|
@ -46,8 +46,6 @@ class KeystonePuppet(openstack.OpenstackBasePuppet):
|
|||
|
||||
return {
|
||||
'keystone::db::postgresql::user': dbuser,
|
||||
'keystone::cache_enabled': True,
|
||||
'keystone::cache_backend': 'dogpile.cache.memcached',
|
||||
|
||||
'platform::client::params::admin_username': admin_username,
|
||||
|
||||
|
@ -153,19 +151,27 @@ class KeystonePuppet(openstack.OpenstackBasePuppet):
|
|||
return config
|
||||
|
||||
def get_host_config(self, host):
|
||||
# The valid format for IPv6 addresses is: inet6:[<ip_v6>]:port
|
||||
# Although, for IPv4, the "inet" part is not mandatory, we
|
||||
# specify if anyway, for consistency purposes.
|
||||
if self._get_address_by_name(
|
||||
constants.CONTROLLER_PLATFORM_NFS,
|
||||
constants.NETWORK_TYPE_MGMT).family == constants.IPV6_FAMILY:
|
||||
argument = "url:inet6:[%s]:11211" % host.mgmt_ip
|
||||
else:
|
||||
argument = "url:inet:%s:11211" % host.mgmt_ip
|
||||
config = {}
|
||||
# The use of caching on subclouds is not supported as the syncing of
|
||||
# fernet keys to the subcloud results in stale cache entries.
|
||||
if self._distributed_cloud_role() != \
|
||||
constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD:
|
||||
# The valid format for IPv6 addresses is: inet6:[<ip_v6>]:port
|
||||
# Although, for IPv4, the "inet" part is not mandatory, we
|
||||
# specify if anyway, for consistency purposes.
|
||||
if self._get_address_by_name(
|
||||
constants.CONTROLLER_PLATFORM_NFS,
|
||||
constants.NETWORK_TYPE_MGMT).family == constants.IPV6_FAMILY:
|
||||
argument = "url:inet6:[%s]:11211" % host.mgmt_ip
|
||||
else:
|
||||
argument = "url:inet:%s:11211" % host.mgmt_ip
|
||||
|
||||
config.update({
|
||||
'keystone::cache_enabled': True,
|
||||
'keystone::cache_backend': 'dogpile.cache.memcached',
|
||||
'keystone::cache_backend_argument': argument
|
||||
})
|
||||
|
||||
config = {
|
||||
'keystone::cache_backend_argument': argument
|
||||
}
|
||||
return config
|
||||
|
||||
def _get_service_parameter_config(self):
|
||||
|
|
|
@ -8,7 +8,9 @@ from __future__ import absolute_import
|
|||
from eventlet.green import subprocess
|
||||
import json
|
||||
import netaddr
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
from oslo_log import log as logging
|
||||
from sysinv.common import constants
|
||||
|
@ -83,18 +85,28 @@ class KubernetesPuppet(base.BasePuppet):
|
|||
if host.personality == constants.CONTROLLER:
|
||||
# Upload the certificates used during kubeadm join
|
||||
# The cert key will be printed in the last line of the output
|
||||
# We will create a temp file with the kubeadm config
|
||||
# We need this because the kubeadm config could have changed
|
||||
# since bootstrap. Reading the kubeadm config each time
|
||||
# it is needed ensures we are not using stale data
|
||||
fd, temp_kubeadm_config_view = tempfile.mkstemp(dir='/tmp', suffix='.yaml')
|
||||
with os.fdopen(fd, 'w') as f:
|
||||
cmd = ['kubeadm', 'config', 'view']
|
||||
subprocess.check_call(cmd, stdout=f)
|
||||
cmd = ['kubeadm', 'init', 'phase', 'upload-certs', '--upload-certs', '--config',
|
||||
'/etc/kubernetes/kubeadm.yaml']
|
||||
temp_kubeadm_config_view]
|
||||
cmd_output = subprocess.check_output(cmd)
|
||||
cert_key = cmd_output.strip().split('\n')[-1]
|
||||
join_cmd_additions = " --control-plane --certificate-key %s" % cert_key
|
||||
os.unlink(temp_kubeadm_config_view)
|
||||
|
||||
cmd = ['kubeadm', 'token', 'create', '--print-join-command',
|
||||
'--description', 'Bootstrap token for %s' % host.hostname]
|
||||
join_cmd = subprocess.check_output(cmd)
|
||||
join_cmd_additions += " --cri-socket /var/run/containerd/containerd.sock"
|
||||
join_cmd = join_cmd.strip() + join_cmd_additions
|
||||
except subprocess.CalledProcessError:
|
||||
except Exception:
|
||||
LOG.exception("Exception generating bootstrap token")
|
||||
raise exception.SysinvException('Failed to generate bootstrap token')
|
||||
|
||||
config.update({'platform::kubernetes::params::join_cmd': join_cmd})
|
||||
|
@ -339,7 +351,10 @@ class KubernetesPuppet(base.BasePuppet):
|
|||
interfaces = self._get_network_interfaces_by_class(ifclass)
|
||||
for iface in interfaces:
|
||||
|
||||
port = interface.get_sriov_interface_port(self.context, iface)
|
||||
if ifclass == constants.INTERFACE_CLASS_PCI_SRIOV:
|
||||
port = interface.get_sriov_interface_port(self.context, iface)
|
||||
else:
|
||||
port = interface.get_interface_port(self.context, iface)
|
||||
if not port:
|
||||
continue
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ class NetworkingPuppet(base.BasePuppet):
|
|||
config.update(self._get_oam_network_config())
|
||||
config.update(self._get_cluster_network_config())
|
||||
config.update(self._get_ironic_network_config())
|
||||
config.update(self._get_storage_network_config())
|
||||
return config
|
||||
|
||||
def get_host_config(self, host):
|
||||
|
@ -32,6 +33,7 @@ class NetworkingPuppet(base.BasePuppet):
|
|||
config.update(self._get_cluster_interface_config())
|
||||
config.update(self._get_ironic_interface_config())
|
||||
config.update(self._get_ptp_interface_config())
|
||||
config.update(self._get_storage_interface_config())
|
||||
if host.personality == constants.CONTROLLER:
|
||||
config.update(self._get_oam_interface_config())
|
||||
return config
|
||||
|
@ -90,6 +92,11 @@ class NetworkingPuppet(base.BasePuppet):
|
|||
config = self._get_network_config(networktype)
|
||||
return config
|
||||
|
||||
def _get_storage_network_config(self):
|
||||
networktype = constants.NETWORK_TYPE_STORAGE
|
||||
config = self._get_network_config(networktype)
|
||||
return config
|
||||
|
||||
def _get_network_config(self, networktype):
|
||||
try:
|
||||
network = self.dbapi.network_get_by_type(networktype)
|
||||
|
@ -175,6 +182,9 @@ class NetworkingPuppet(base.BasePuppet):
|
|||
def _get_ironic_interface_config(self):
|
||||
return self._get_interface_config(constants.NETWORK_TYPE_IRONIC)
|
||||
|
||||
def _get_storage_interface_config(self):
|
||||
return self._get_interface_config(constants.NETWORK_TYPE_STORAGE)
|
||||
|
||||
def _get_ptp_interface_config(self):
|
||||
config = {}
|
||||
ptp_devices = {
|
||||
|
|
|
@ -431,16 +431,61 @@ class PlatformPuppet(base.BasePuppet):
|
|||
ptp_enabled = True
|
||||
else:
|
||||
ptp_enabled = False
|
||||
return {'platform::ptp::enabled': ptp_enabled}
|
||||
|
||||
ptp_config = {
|
||||
'tx_timestamp_timeout': '20',
|
||||
'summary_interval': '6',
|
||||
'clock_servo': 'linreg',
|
||||
'delay_mechanism': ptp.mechanism.upper(),
|
||||
'time_stamping': ptp.mode.lower()
|
||||
}
|
||||
|
||||
if ptp.mode.lower() == 'hardware':
|
||||
ptp_config.update({'boundary_clock_jbod': '1'})
|
||||
|
||||
ptp_service_params = self.dbapi.service_parameter_get_all(
|
||||
service=constants.SERVICE_TYPE_PTP, section=constants.SERVICE_PARAM_SECTION_PTP_GLOBAL)
|
||||
|
||||
# Merge options specified in service parameters with ptp database values and defaults
|
||||
for param in ptp_service_params:
|
||||
ptp_config.update({param.name: param.value})
|
||||
|
||||
transport = constants.PTP_TRANSPORT_L2
|
||||
|
||||
specified_transport = ptp_config.get('network_transport')
|
||||
if specified_transport:
|
||||
# Currently we can only set the network transport globally. Setting the transport flag
|
||||
# to udp will force puppet to apply the correct UDP family to each interface
|
||||
if specified_transport != constants.PTP_NETWORK_TRANSPORT_IEEE_802_3:
|
||||
transport = constants.PTP_TRANSPORT_UDP
|
||||
else:
|
||||
ptp_config.update({'network_transport': constants.PTP_NETWORK_TRANSPORT_IEEE_802_3})
|
||||
transport = ptp.transport
|
||||
|
||||
# Generate ptp4l global options
|
||||
ptp4l_options = []
|
||||
for key, value in ptp_config.items():
|
||||
ptp4l_options.append({'name': key, 'value': value})
|
||||
|
||||
# Get the options for the phc2sys system
|
||||
phc2sys_config = constants.PTP_PHC2SYS_DEFAULTS
|
||||
phc2sys_service_params = self.dbapi.service_parameter_get_all(
|
||||
service=constants.SERVICE_TYPE_PTP,
|
||||
section=constants.SERVICE_PARAM_SECTION_PTP_PHC2SYS)
|
||||
|
||||
for param in phc2sys_service_params:
|
||||
phc2sys_config.update({param.name: param.value})
|
||||
|
||||
phc2sys_options = ''
|
||||
for key, value in phc2sys_config.items():
|
||||
phc2sys_options += '-' + constants.PTP_PHC2SYS_OPTIONS_MAP[key] + ' ' + str(value) + ' '
|
||||
|
||||
return {
|
||||
'platform::ptp::enabled':
|
||||
ptp_enabled,
|
||||
'platform::ptp::mode':
|
||||
ptp.mode,
|
||||
'platform::ptp::transport':
|
||||
ptp.transport,
|
||||
'platform::ptp::mechanism':
|
||||
ptp.mechanism,
|
||||
'platform::ptp::enabled': ptp_enabled,
|
||||
'platform::ptp::transport': transport,
|
||||
'platform::ptp::ptp4l_options': ptp4l_options,
|
||||
'platform::ptp::phc2sys_options': phc2sys_options
|
||||
}
|
||||
|
||||
def _get_host_sysctl_config(self, host):
|
||||
|
|
|
@ -98,6 +98,25 @@ class FunctionalTest(base.TestCase):
|
|||
print('GOT:%s' % response)
|
||||
return response
|
||||
|
||||
def post_with_files(self, path, params, upload_files, expect_errors=False,
|
||||
headers=None, method="post", extra_environ=None,
|
||||
status=None, path_prefix=PATH_PREFIX):
|
||||
full_path = path_prefix + path
|
||||
if DEBUG_PRINTING:
|
||||
print('%s: %s %s' % (method.upper(), full_path, params))
|
||||
response = getattr(self.app, "%s" % method)(
|
||||
str(full_path),
|
||||
params,
|
||||
upload_files=upload_files,
|
||||
headers=headers,
|
||||
status=status,
|
||||
extra_environ=extra_environ,
|
||||
expect_errors=expect_errors
|
||||
)
|
||||
if DEBUG_PRINTING:
|
||||
print('GOT:%s' % response)
|
||||
return response
|
||||
|
||||
def put_json(self, *args, **kwargs):
|
||||
kwargs['method'] = 'put'
|
||||
return self.post_json(*args, **kwargs)
|
||||
|
@ -117,12 +136,13 @@ class FunctionalTest(base.TestCase):
|
|||
return self.post_json(path, expect_errors=expect_errors,
|
||||
headers=headers, **newargs)
|
||||
|
||||
def patch_dict(self, path, data, expect_errors=False):
|
||||
def patch_dict(self, path, data, expect_errors=False, headers=None):
|
||||
params = []
|
||||
for key, value in data.items():
|
||||
pathkey = '/' + key
|
||||
params.append({'op': 'replace', 'path': pathkey, 'value': value})
|
||||
return self.post_json(path, expect_errors=expect_errors, params=params, method='patch')
|
||||
return self.post_json(path, expect_errors=expect_errors, params=params,
|
||||
method='patch', headers=headers)
|
||||
|
||||
def delete(self, path, expect_errors=False, headers=None,
|
||||
extra_environ=None, status=None, path_prefix=PATH_PREFIX):
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDfTCCAmWgAwIBAgIJAKW/fs28rzSQMA0GCSqGSIb3DQEBCwUAMFUxCzAJBgNV
|
||||
BAYTAkNBMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
|
||||
Q29tcGFueSBMdGQxETAPBgNVBAMMCHRlc3RfY2ExMB4XDTIwMDMxODEzNDcyNloX
|
||||
DTIzMDEwNjEzNDcyNlowVTELMAkGA1UEBhMCQ0ExFTATBgNVBAcMDERlZmF1bHQg
|
||||
Q2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDERMA8GA1UEAwwIdGVz
|
||||
dF9jYTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwIvCInUpgBlwz
|
||||
+ZPr++dsL5UygKQjUwWkjp4NDxs2vYmCuhwgeLoOYwf9TCAIXD+9iR3rN+lUzqWH
|
||||
NvJAfeW6q0cBnFf6NSI4gW0JVvJOUY2d0JJwLsQNyirI8ssxZcuoFr7iKb1rxnPM
|
||||
Suyh1Ji+GeC8CPLnNdWZGvnNtPNOCpdK72l2uWPcBLSvU+/zGEkhw6yzoQhZBMAX
|
||||
OXC4DIrAfcS7MehYpmLnmLdEn0MKLe9fssjuHSALos8FEszfU2Q5sdOO5HxV3+Ua
|
||||
JyY4jnxuP5eq/VmzPnfjNJqYOTpX5ZZGr91LPvERaPybMwaGLHV/ZdrkAZntTWoM
|
||||
F4JI2eb1AgMBAAGjUDBOMB0GA1UdDgQWBBTYewS81nc74bgd82r0OULsaCyvDTAf
|
||||
BgNVHSMEGDAWgBTYewS81nc74bgd82r0OULsaCyvDTAMBgNVHRMEBTADAQH/MA0G
|
||||
CSqGSIb3DQEBCwUAA4IBAQCpbrpcKCAqgjUHDm9DbG9Y3NUED/gajE8+mJFvcjEC
|
||||
CJlLISDoUrRpE/vqlVpoj8mPmMaSVd5doX6G6PSnA2hNnjLkts9OQGGbGpXYtkBN
|
||||
WD09EnrJbeEtofc/eSgTO17ePirTBy2LJ0nTuTUlN2wkAhzOtrYI2fEw4ZqqLBkM
|
||||
eOpUE3+A92/L4iqhCxyxv1DxvYNDRq7SvtS/TxkXRcsyPDrUR5/sOhn6Rcb0J9I8
|
||||
pA37oiqiBRUnDoE2+IxRiCyC5/FYQdCIR8Y/2g8xpgY/trYFl5IDJbge+6jaCfMl
|
||||
5NgkuCPTKCtPtfLKAWUfXV/FM58nyDYKuyreCr7lAnc0
|
||||
-----END CERTIFICATE-----
|
|
@ -0,0 +1,42 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDfTCCAmWgAwIBAgIJAKW/fs28rzSQMA0GCSqGSIb3DQEBCwUAMFUxCzAJBgNV
|
||||
BAYTAkNBMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
|
||||
Q29tcGFueSBMdGQxETAPBgNVBAMMCHRlc3RfY2ExMB4XDTIwMDMxODEzNDcyNloX
|
||||
DTIzMDEwNjEzNDcyNlowVTELMAkGA1UEBhMCQ0ExFTATBgNVBAcMDERlZmF1bHQg
|
||||
Q2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDERMA8GA1UEAwwIdGVz
|
||||
dF9jYTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwIvCInUpgBlwz
|
||||
+ZPr++dsL5UygKQjUwWkjp4NDxs2vYmCuhwgeLoOYwf9TCAIXD+9iR3rN+lUzqWH
|
||||
NvJAfeW6q0cBnFf6NSI4gW0JVvJOUY2d0JJwLsQNyirI8ssxZcuoFr7iKb1rxnPM
|
||||
Suyh1Ji+GeC8CPLnNdWZGvnNtPNOCpdK72l2uWPcBLSvU+/zGEkhw6yzoQhZBMAX
|
||||
OXC4DIrAfcS7MehYpmLnmLdEn0MKLe9fssjuHSALos8FEszfU2Q5sdOO5HxV3+Ua
|
||||
JyY4jnxuP5eq/VmzPnfjNJqYOTpX5ZZGr91LPvERaPybMwaGLHV/ZdrkAZntTWoM
|
||||
F4JI2eb1AgMBAAGjUDBOMB0GA1UdDgQWBBTYewS81nc74bgd82r0OULsaCyvDTAf
|
||||
BgNVHSMEGDAWgBTYewS81nc74bgd82r0OULsaCyvDTAMBgNVHRMEBTADAQH/MA0G
|
||||
CSqGSIb3DQEBCwUAA4IBAQCpbrpcKCAqgjUHDm9DbG9Y3NUED/gajE8+mJFvcjEC
|
||||
CJlLISDoUrRpE/vqlVpoj8mPmMaSVd5doX6G6PSnA2hNnjLkts9OQGGbGpXYtkBN
|
||||
WD09EnrJbeEtofc/eSgTO17ePirTBy2LJ0nTuTUlN2wkAhzOtrYI2fEw4ZqqLBkM
|
||||
eOpUE3+A92/L4iqhCxyxv1DxvYNDRq7SvtS/TxkXRcsyPDrUR5/sOhn6Rcb0J9I8
|
||||
pA37oiqiBRUnDoE2+IxRiCyC5/FYQdCIR8Y/2g8xpgY/trYFl5IDJbge+6jaCfMl
|
||||
5NgkuCPTKCtPtfLKAWUfXV/FM58nyDYKuyreCr7lAnc0
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDfTCCAmWgAwIBAgIJAJKcXHBwS9zSMA0GCSqGSIb3DQEBCwUAMFUxCzAJBgNV
|
||||
BAYTAkNBMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg
|
||||
Q29tcGFueSBMdGQxETAPBgNVBAMMCHRlc3RfY2EyMB4XDTIwMDMxODIxMDQzMVoX
|
||||
DTIzMDEwNjIxMDQzMVowVTELMAkGA1UEBhMCQ0ExFTATBgNVBAcMDERlZmF1bHQg
|
||||
Q2l0eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDERMA8GA1UEAwwIdGVz
|
||||
dF9jYTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKJSwLNnNf4djp
|
||||
zep+cGn35u/AY7X7D/g1bETX7evDq9EQjSntZzjop/r6MxM57dCRRVSe9M8SsqUX
|
||||
UBtUTe2sg30lVJqMP7WRT8p06ie/e6prHHUjcIFUd4xm8AmWORTXr0FsXr3mI2VJ
|
||||
lW9ZDuF7tuuBuK67IAdA2T2snUjG+V5k0aW70JLisu2Mnhgn1o4+0UGOIc3UDQ/q
|
||||
WfMsGN/rTZV/XbVyZJoi9iWKnhwpGLlgA9ouVr9WK1Co/ZMw05lrDjzLmG6niyBW
|
||||
LUEET0ASnuaAV12EFpEvWIq9xk9wssBgf87WSF0Z/vk1++aKjF6lBfMKEhbz8hof
|
||||
yFF9lQ07AgMBAAGjUDBOMB0GA1UdDgQWBBQSjySIXiA5Gdjhbl/EhpWyb12ErjAf
|
||||
BgNVHSMEGDAWgBQSjySIXiA5Gdjhbl/EhpWyb12ErjAMBgNVHRMEBTADAQH/MA0G
|
||||
CSqGSIb3DQEBCwUAA4IBAQBzYgj4QUkspL65Hf1k47l9ptTPGm/XqqKBEPe2I9o6
|
||||
9v0Ogfy3HwWgyUpN3cww6SN9xIPZAaBv+mbSDa/mw9woewJ8+gUBIM98rzJmfF9x
|
||||
UUzuEBRuTT/K36QzblcgC+1RbLeLOQJ+TvTfnTFBh8+UF+GgUJAIKsGEOX7Ww5cw
|
||||
OmfKDu56gNLqdlWT7tXKpc3m0DlADV0HrmeOoUoBRi0PdB5FfSXGnNc8vrEicpZO
|
||||
Yo6E4ZCB0dRJhAgl4sVFNUw5xK1eXQPjkHNkd26zGNKb0u2G8XOxfbSXTTcU1gqb
|
||||
Bl93WuquFHeLMPeX7w1+FPvP9kXA1ibBfrfHSyp65dXL
|
||||
-----END CERTIFICATE-----
|
|
@ -8,6 +8,7 @@
|
|||
Tests for the API / address / methods.
|
||||
"""
|
||||
|
||||
import mock
|
||||
import netaddr
|
||||
from six.moves import http_client
|
||||
|
||||
|
@ -77,20 +78,24 @@ class AddressTestCase(base.FunctionalTest, dbbase.BaseHostTestCase):
|
|||
self.assertNotIn(field, api_object)
|
||||
|
||||
def get_post_object(self, name='test_address', ip_address='127.0.0.1',
|
||||
prefix=8, address_pool_id=None, interface_id=None):
|
||||
prefix=8, address_pool_id=None, interface_uuid=None):
|
||||
addr = netaddr.IPAddress(ip_address)
|
||||
addr_db = dbutils.get_test_address(
|
||||
address=str(addr),
|
||||
prefix=prefix,
|
||||
name=name,
|
||||
address_pool_id=address_pool_id,
|
||||
interface_id=interface_id,
|
||||
)
|
||||
|
||||
if self.oam_subnet.version == 6:
|
||||
addr_db["enable_dad"] = True
|
||||
|
||||
# pool_uuid in api corresponds to address_pool_id in db
|
||||
addr_db['pool_uuid'] = addr_db.pop('address_pool_id')
|
||||
addr_db['interface_uuid'] = addr_db.pop('interface_id')
|
||||
addr_db.pop('family')
|
||||
addr_db['interface_uuid'] = interface_uuid
|
||||
|
||||
del addr_db['family']
|
||||
del addr_db['interface_id']
|
||||
|
||||
return addr_db
|
||||
|
||||
|
@ -99,15 +104,16 @@ class TestPostMixin(AddressTestCase):
|
|||
|
||||
def setUp(self):
|
||||
super(TestPostMixin, self).setUp()
|
||||
self.worker = self._create_test_host(constants.WORKER,
|
||||
administrative=constants.ADMIN_LOCKED)
|
||||
|
||||
def _test_create_address_success(self, name, ip_address, prefix,
|
||||
address_pool_id, interface_id):
|
||||
address_pool_id, interface_uuid):
|
||||
# Test creation of object
|
||||
|
||||
addr_db = self.get_post_object(name=name, ip_address=ip_address,
|
||||
prefix=prefix,
|
||||
address_pool_id=address_pool_id,
|
||||
interface_id=interface_id)
|
||||
interface_uuid=interface_uuid)
|
||||
response = self.post_json(self.API_PREFIX,
|
||||
addr_db,
|
||||
headers=self.API_HEADERS)
|
||||
|
@ -121,14 +127,14 @@ class TestPostMixin(AddressTestCase):
|
|||
addr_db[self.COMMON_FIELD])
|
||||
|
||||
def _test_create_address_fail(self, name, ip_address, prefix,
|
||||
address_pool_id, interface_id,
|
||||
status_code, error_message):
|
||||
address_pool_id, status_code,
|
||||
error_message, interface_uuid=None):
|
||||
# Test creation of object
|
||||
|
||||
addr_db = self.get_post_object(name=name, ip_address=ip_address,
|
||||
prefix=prefix,
|
||||
address_pool_id=address_pool_id,
|
||||
interface_id=interface_id)
|
||||
interface_uuid=interface_uuid)
|
||||
response = self.post_json(self.API_PREFIX,
|
||||
addr_db,
|
||||
headers=self.API_HEADERS,
|
||||
|
@ -143,8 +149,7 @@ class TestPostMixin(AddressTestCase):
|
|||
self._test_create_address_success(
|
||||
"fake-address",
|
||||
str(self.oam_subnet[25]), self.oam_subnet.prefixlen,
|
||||
address_pool_id=self.address_pools[2].uuid,
|
||||
interface_id=None,
|
||||
address_pool_id=self.address_pools[2].uuid, interface_uuid=None
|
||||
)
|
||||
|
||||
def test_create_address_wrong_address_pool(self):
|
||||
|
@ -152,7 +157,6 @@ class TestPostMixin(AddressTestCase):
|
|||
"fake-address",
|
||||
str(self.oam_subnet[25]), self.oam_subnet.prefixlen,
|
||||
address_pool_id=self.address_pools[1].uuid,
|
||||
interface_id=None,
|
||||
status_code=http_client.CONFLICT,
|
||||
error_message="does not match pool network",
|
||||
)
|
||||
|
@ -162,7 +166,6 @@ class TestPostMixin(AddressTestCase):
|
|||
"fake-address",
|
||||
str(self.oam_subnet[25]), self.oam_subnet.prefixlen - 1,
|
||||
address_pool_id=self.address_pools[2].uuid,
|
||||
interface_id=None,
|
||||
status_code=http_client.CONFLICT,
|
||||
error_message="does not match pool network",
|
||||
)
|
||||
|
@ -174,7 +177,6 @@ class TestPostMixin(AddressTestCase):
|
|||
"fake-address",
|
||||
str(self.oam_subnet[25]), 0,
|
||||
address_pool_id=self.address_pools[2].uuid,
|
||||
interface_id=None,
|
||||
status_code=http_client.INTERNAL_SERVER_ERROR,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
@ -189,7 +191,6 @@ class TestPostMixin(AddressTestCase):
|
|||
"fake-address",
|
||||
zero_address, self.oam_subnet.prefixlen,
|
||||
address_pool_id=self.address_pools[2].uuid,
|
||||
interface_id=None,
|
||||
status_code=http_client.INTERNAL_SERVER_ERROR,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
@ -199,7 +200,6 @@ class TestPostMixin(AddressTestCase):
|
|||
"fake_address",
|
||||
str(self.oam_subnet[25]), self.oam_subnet.prefixlen,
|
||||
address_pool_id=self.address_pools[2].uuid,
|
||||
interface_id=None,
|
||||
status_code=http_client.BAD_REQUEST,
|
||||
error_message="Please configure valid hostname.",
|
||||
)
|
||||
|
@ -209,11 +209,36 @@ class TestPostMixin(AddressTestCase):
|
|||
"fake-address",
|
||||
str(self.multicast_subnet[1]), self.oam_subnet.prefixlen,
|
||||
address_pool_id=self.address_pools[2].uuid,
|
||||
interface_id=None,
|
||||
status_code=http_client.INTERNAL_SERVER_ERROR,
|
||||
error_message="Address must be a unicast address",
|
||||
)
|
||||
|
||||
def test_create_address_platform_interface(self):
|
||||
if self.oam_subnet.version == 4:
|
||||
ipv4_mode, ipv6_mode = (constants.IPV4_STATIC, constants.IPV6_DISABLED)
|
||||
else:
|
||||
ipv4_mode, ipv6_mode = (constants.IPV4_DISABLED, constants.IPV6_STATIC)
|
||||
|
||||
# Create platform interface, patch to make static
|
||||
interface = dbutils.create_test_interface(
|
||||
ifname="platformip",
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
forihostid=self.worker.id,
|
||||
ihost_uuid=self.worker.uuid)
|
||||
response = self.patch_dict_json(
|
||||
'%s/%s' % (self.IFACE_PREFIX, interface['uuid']),
|
||||
ipv4_mode=ipv4_mode, ipv6_mode=ipv6_mode)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
self.assertEqual(response.json['ifclass'], 'platform')
|
||||
self.assertEqual(response.json['ipv4_mode'], ipv4_mode)
|
||||
self.assertEqual(response.json['ipv6_mode'], ipv6_mode)
|
||||
|
||||
# Verify an address associated with the interface can be created
|
||||
self._test_create_address_success('platformtest',
|
||||
str(self.oam_subnet[25]), self.oam_subnet.prefixlen,
|
||||
None, interface.uuid)
|
||||
|
||||
|
||||
class TestDelete(AddressTestCase):
|
||||
""" Tests deletion.
|
||||
|
@ -224,6 +249,8 @@ class TestDelete(AddressTestCase):
|
|||
|
||||
def setUp(self):
|
||||
super(TestDelete, self).setUp()
|
||||
self.worker = self._create_test_host(constants.WORKER,
|
||||
administrative=constants.ADMIN_LOCKED)
|
||||
|
||||
def test_delete(self):
|
||||
# Delete the API object
|
||||
|
@ -235,11 +262,117 @@ class TestDelete(AddressTestCase):
|
|||
# Verify the expected API response for the delete
|
||||
self.assertEqual(response.status_code, http_client.NO_CONTENT)
|
||||
|
||||
# TODO: Add unit tests to verify deletion is rejected as expected by
|
||||
# _check_orphaned_routes, _check_host_state, and _check_from_pool.
|
||||
#
|
||||
# Currently blocked by bug in dbapi preventing testcase setup:
|
||||
# https://bugs.launchpad.net/starlingx/+bug/1861131
|
||||
def test_delete_address_with_interface(self):
|
||||
interface = dbutils.create_test_interface(
|
||||
ifname="test0",
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
forihostid=self.worker.id,
|
||||
ihost_uuid=self.worker.uuid)
|
||||
|
||||
address = dbutils.create_test_address(
|
||||
interface_id=interface.id,
|
||||
name="enptest01",
|
||||
family=self.oam_subnet.version,
|
||||
address=str(self.oam_subnet[25]),
|
||||
prefix=self.oam_subnet.prefixlen)
|
||||
self.assertEqual(address["interface_id"], interface.id)
|
||||
|
||||
response = self.delete(self.get_single_url(address.uuid),
|
||||
headers=self.API_HEADERS)
|
||||
self.assertEqual(response.status_code, http_client.NO_CONTENT)
|
||||
|
||||
def test_orphaned_routes(self):
|
||||
interface = dbutils.create_test_interface(
|
||||
ifname="test0",
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
forihostid=self.worker.id,
|
||||
ihost_uuid=self.worker.uuid)
|
||||
|
||||
address = dbutils.create_test_address(
|
||||
interface_id=interface.id,
|
||||
name="enptest01",
|
||||
family=self.oam_subnet.version,
|
||||
address=str(self.oam_subnet[25]),
|
||||
prefix=self.oam_subnet.prefixlen)
|
||||
self.assertEqual(address["interface_id"], interface.id)
|
||||
|
||||
route = dbutils.create_test_route(
|
||||
interface_id=interface.id,
|
||||
family=4,
|
||||
network='10.10.10.0',
|
||||
prefix=24,
|
||||
gateway=str(self.oam_subnet[1]),
|
||||
)
|
||||
self.assertEqual(route['gateway'], str(self.oam_subnet[1]))
|
||||
|
||||
response = self.delete(self.get_single_url(address.uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.CONFLICT)
|
||||
self.assertIn(
|
||||
"Address %s is in use by a route to %s/%d via %s" % (
|
||||
address["address"], route["network"], route["prefix"],
|
||||
route["gateway"]
|
||||
), response.json['error_message'])
|
||||
|
||||
def test_bad_host_state(self):
|
||||
interface = dbutils.create_test_interface(
|
||||
ifname="test0",
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
forihostid=self.worker.id,
|
||||
ihost_uuid=self.worker.uuid)
|
||||
|
||||
address = dbutils.create_test_address(
|
||||
interface_id=interface.id,
|
||||
name="enptest01",
|
||||
family=self.oam_subnet.version,
|
||||
address=str(self.oam_subnet[25]),
|
||||
prefix=self.oam_subnet.prefixlen)
|
||||
self.assertEqual(address["interface_id"], interface.id)
|
||||
|
||||
# unlock the worker
|
||||
dbapi = dbutils.db_api.get_instance()
|
||||
worker = dbapi.ihost_update(self.worker.uuid, {
|
||||
"administrative": constants.ADMIN_UNLOCKED
|
||||
})
|
||||
self.assertEqual(worker['administrative'],
|
||||
constants.ADMIN_UNLOCKED)
|
||||
|
||||
response = self.delete(self.get_single_url(address.uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code,
|
||||
http_client.INTERNAL_SERVER_ERROR)
|
||||
self.assertIn("administrative state = unlocked",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_delete_address_from_pool(self):
|
||||
pool = dbutils.create_test_address_pool(
|
||||
name='testpool',
|
||||
network='192.168.204.0',
|
||||
ranges=[['192.168.204.2', '192.168.204.254']],
|
||||
prefix=24)
|
||||
address = dbutils.create_test_address(
|
||||
name="enptest01",
|
||||
family=4,
|
||||
address='192.168.204.4',
|
||||
prefix=24,
|
||||
address_pool_id=pool.id)
|
||||
self.assertEqual(address['pool_uuid'], pool.uuid)
|
||||
|
||||
with mock.patch(
|
||||
'sysinv.common.utils.is_initial_config_complete', lambda: True):
|
||||
response = self.delete(self.get_single_url(address.uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code,
|
||||
http_client.CONFLICT)
|
||||
self.assertIn("Address has been allocated from pool; "
|
||||
"cannot be manually deleted",
|
||||
response.json['error_message'])
|
||||
|
||||
|
||||
class TestList(AddressTestCase):
|
||||
|
|
|
@ -8,14 +8,33 @@
|
|||
#
|
||||
|
||||
"""
|
||||
Tests for the API /certificate_install/ methods.
|
||||
Tests for the API /certificate_install/delete methods.
|
||||
"""
|
||||
|
||||
import json
|
||||
import mock
|
||||
import os
|
||||
import uuid as UUID
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from six.moves import http_client
|
||||
from sysinv.api.controllers.v1 import certificate as cert_api
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
|
||||
|
||||
class FakeConductorAPI(object):
|
||||
|
||||
def __init__(self):
|
||||
self.config_certificate = self.fake_config_certificate
|
||||
self.delete_certificate = mock.MagicMock()
|
||||
self.config_certificate_return = None
|
||||
|
||||
def fake_config_certificate(self, context, pem, config_dict):
|
||||
return self.config_certificate_return
|
||||
|
||||
def setup_config_certificate(self, data):
|
||||
self.config_certificate_return = data
|
||||
|
||||
|
||||
class CertificateTestCase(base.FunctionalTest):
|
||||
|
@ -137,3 +156,225 @@ class CertificateTestCase(base.FunctionalTest):
|
|||
|
||||
result = cert_api._check_cert_dns_name(cert, 'x.example.com')
|
||||
self.assertIn("doesn't match", str(result))
|
||||
|
||||
|
||||
class ApiCertificateTestCaseMixin(object):
|
||||
|
||||
# API_HEADERS are a generic header passed to most API calls
|
||||
API_HEADERS = {'User-Agent': 'sysinv-test'}
|
||||
|
||||
# API_PREFIX is the prefix for the URL
|
||||
API_PREFIX = '/certificate'
|
||||
|
||||
# RESULT_KEY is the python table key for the list of results
|
||||
RESULT_KEY = 'certificates'
|
||||
|
||||
# COMMON_FIELD is a field that is known to exist for inputs and outputs
|
||||
COMMON_FIELD = 'certificates'
|
||||
|
||||
# expected_api_fields are attributes that should be populated by
|
||||
# an API query
|
||||
expected_api_fields = ['uuid']
|
||||
|
||||
# hidden_api_fields are attributes that should not be populated by
|
||||
# an API query
|
||||
hidden_api_fields = []
|
||||
|
||||
def setUp(self):
|
||||
super(ApiCertificateTestCaseMixin, self).setUp()
|
||||
self.fake_conductor_api = FakeConductorAPI()
|
||||
|
||||
p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI')
|
||||
self.mock_conductor_api = p.start()
|
||||
self.mock_conductor_api.return_value = self.fake_conductor_api
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
def get_single_url(self, uuid):
|
||||
return '%s/%s' % (self.API_PREFIX, uuid)
|
||||
|
||||
def _create_db_object(self, obj_id=None):
|
||||
return dbutils.create_test_certificate(
|
||||
id=obj_id, certtype='ssl_ca', signature='ssl_ca_123456789')
|
||||
|
||||
@staticmethod
|
||||
def extract_certs_from_pem_file(certfile):
|
||||
""" extract certificates from a X509 PEM file
|
||||
"""
|
||||
marker = b'-----BEGIN CERTIFICATE-----'
|
||||
with open(certfile, 'rb') as f:
|
||||
pem_contents = f.read()
|
||||
start = 0
|
||||
certs = []
|
||||
while True:
|
||||
index = pem_contents.find(marker, start)
|
||||
if index == -1:
|
||||
break
|
||||
cert = x509.load_pem_x509_certificate(pem_contents[index::],
|
||||
default_backend())
|
||||
certs.append(cert)
|
||||
start = start + index + len(marker)
|
||||
return certs
|
||||
|
||||
@staticmethod
|
||||
def get_cert_signature(mode, cert):
|
||||
signature = mode + '_' + str(cert.serial_number)
|
||||
if len(signature) > 255:
|
||||
signature = signature[:255]
|
||||
return signature
|
||||
|
||||
|
||||
class ApiCertificatePostTestSuite(ApiCertificateTestCaseMixin,
|
||||
base.FunctionalTest):
|
||||
""" Certificate post operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiCertificatePostTestSuite, self).setUp()
|
||||
self.create_test_isystem()
|
||||
|
||||
def create_test_isystem(self):
|
||||
return dbutils.create_test_isystem(capabilities={'https_enabled': True})
|
||||
|
||||
# Test successful POST operation to install 1 CA certificate
|
||||
def test_install_one_CA_certificate(self):
|
||||
mode = 'ssl_ca'
|
||||
certfile = os.path.join(os.path.dirname(__file__), "data",
|
||||
'ca-cert-one-cert.pem')
|
||||
|
||||
in_certs = self.extract_certs_from_pem_file(certfile)
|
||||
fake_config_certificate_return = []
|
||||
for in_cert in in_certs:
|
||||
fake_config_certificate_return.append(
|
||||
{'signature': self.get_cert_signature(mode, in_cert),
|
||||
'not_valid_before': in_cert.not_valid_before,
|
||||
'not_valid_after': in_cert.not_valid_after})
|
||||
self.fake_conductor_api.\
|
||||
setup_config_certificate(fake_config_certificate_return)
|
||||
|
||||
data = {'mode': mode}
|
||||
files = [('file', certfile)]
|
||||
response = self.post_with_files('%s/%s' % (self.API_PREFIX, 'certificate_install'),
|
||||
data,
|
||||
upload_files=files,
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=False)
|
||||
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
resp = json.loads(response.body)
|
||||
self.assertIn('certificates', resp)
|
||||
ret_certs = resp.get('certificates')
|
||||
self.assertEqual(len(in_certs), len(ret_certs))
|
||||
for ret_cert in ret_certs:
|
||||
self.assertIn('certtype', ret_cert)
|
||||
self.assertEqual(ret_cert.get('certtype'), mode)
|
||||
self.assertIn('signature', ret_cert)
|
||||
self.assertIn('start_date', ret_cert)
|
||||
self.assertIn('expiry_date', ret_cert)
|
||||
found_match = False
|
||||
for in_cert in in_certs:
|
||||
ret_cert_start_date = str(ret_cert.get('start_date'))
|
||||
ret_cert_start_date = ret_cert_start_date.replace('+00:00', '')
|
||||
ret_cert_expiry_date = str(ret_cert.get('expiry_date'))
|
||||
ret_cert_expiry_date = \
|
||||
ret_cert_expiry_date.replace('+00:00', '')
|
||||
if ret_cert.get('signature') == \
|
||||
self.get_cert_signature(mode, in_cert) and \
|
||||
ret_cert_start_date == \
|
||||
str(in_cert.not_valid_before) and \
|
||||
ret_cert_expiry_date == \
|
||||
str(in_cert.not_valid_after):
|
||||
found_match = True
|
||||
self.assertTrue(found_match)
|
||||
|
||||
# Test successful POST operation to install 2 CA certificate
|
||||
def test_install_two_CA_certificate(self):
|
||||
mode = 'ssl_ca'
|
||||
certfile = os.path.join(os.path.dirname(__file__), "data",
|
||||
'ca-cert-two-certs.pem')
|
||||
|
||||
in_certs = self.extract_certs_from_pem_file(certfile)
|
||||
fake_config_certificate_return = []
|
||||
for in_cert in in_certs:
|
||||
fake_config_certificate_return.append(
|
||||
{'signature': self.get_cert_signature(mode, in_cert),
|
||||
'not_valid_before': in_cert.not_valid_before,
|
||||
'not_valid_after': in_cert.not_valid_after})
|
||||
self.fake_conductor_api.\
|
||||
setup_config_certificate(fake_config_certificate_return)
|
||||
|
||||
data = {'mode': mode}
|
||||
files = [('file', certfile)]
|
||||
response = self.post_with_files('%s/%s' % (self.API_PREFIX,
|
||||
'certificate_install'),
|
||||
data,
|
||||
upload_files=files,
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=False)
|
||||
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
resp = json.loads(response.body)
|
||||
self.assertIn('certificates', resp)
|
||||
ret_certs = resp.get('certificates')
|
||||
self.assertEqual(len(in_certs), len(ret_certs))
|
||||
for ret_cert in ret_certs:
|
||||
self.assertIn('certtype', ret_cert)
|
||||
self.assertEqual(ret_cert.get('certtype'), mode)
|
||||
self.assertIn('signature', ret_cert)
|
||||
self.assertIn('start_date', ret_cert)
|
||||
self.assertIn('expiry_date', ret_cert)
|
||||
found_match = False
|
||||
for in_cert in in_certs:
|
||||
ret_cert_start_date = str(ret_cert.get('start_date'))
|
||||
ret_cert_start_date = ret_cert_start_date.replace('+00:00', '')
|
||||
ret_cert_expiry_date = str(ret_cert.get('expiry_date'))
|
||||
ret_cert_expiry_date = \
|
||||
ret_cert_expiry_date.replace('+00:00', '')
|
||||
if ret_cert.get('signature') == \
|
||||
self.get_cert_signature(mode, in_cert) and \
|
||||
ret_cert_start_date == \
|
||||
str(in_cert.not_valid_before) and \
|
||||
ret_cert_expiry_date == \
|
||||
str(in_cert.not_valid_after):
|
||||
found_match = True
|
||||
self.assertTrue(found_match)
|
||||
|
||||
|
||||
class ApiCertificateDeleteTestSuite(ApiCertificateTestCaseMixin,
|
||||
base.FunctionalTest):
|
||||
""" Certificate delete operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiCertificateDeleteTestSuite, self).setUp()
|
||||
self.delete_object = self._create_db_object()
|
||||
|
||||
# Test successful CA certficate DELETE operation
|
||||
def test_delete_ca_certificate(self):
|
||||
uuid = self.delete_object.uuid
|
||||
certtype = self.delete_object.certtype
|
||||
signature = self.delete_object.signature
|
||||
response = self.delete(self.get_single_url(uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=False)
|
||||
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
self.assertTrue(response.body)
|
||||
resp = json.loads(response.body)
|
||||
self.assertIn('uuid', resp)
|
||||
self.assertEqual(uuid, resp.get('uuid'))
|
||||
self.assertIn('certtype', resp)
|
||||
self.assertEqual(certtype, resp.get('certtype'))
|
||||
self.assertIn('signature', resp)
|
||||
self.assertEqual(signature, resp.get('signature'))
|
||||
|
||||
# Test CA certficate DELETE operation, no certificate found
|
||||
def test_delete_ca_certificate_not_found(self):
|
||||
uuid = UUID.uuid4()
|
||||
response = self.delete(self.get_single_url(uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertTrue(response.body)
|
||||
resp = json.loads(response.body)
|
||||
self.assertTrue(resp.get('error_message'))
|
||||
fault_string_expected = 'No certificate found for %s' % uuid
|
||||
self.assertIn(fault_string_expected, str(resp.get('error_message')))
|
||||
|
|
|
@ -0,0 +1,652 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Tests for the API / controller-fs / methods.
|
||||
"""
|
||||
|
||||
import mock
|
||||
from six.moves import http_client
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import base as dbbase
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
|
||||
|
||||
class FakeConductorAPI(object):
|
||||
|
||||
def __init__(self):
|
||||
self.get_controllerfs_lv_sizes = mock.MagicMock()
|
||||
self.update_storage_config = mock.MagicMock()
|
||||
|
||||
|
||||
class FakeException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ApiControllerFSTestCaseMixin(base.FunctionalTest,
|
||||
dbbase.ControllerHostTestCase):
|
||||
|
||||
# API_HEADERS are a generic header passed to most API calls
|
||||
API_HEADERS = {'User-Agent': 'sysinv-test'}
|
||||
|
||||
# API_PREFIX is the prefix for the URL
|
||||
API_PREFIX = '/controller_fs'
|
||||
|
||||
# RESULT_KEY is the python table key for the list of results
|
||||
RESULT_KEY = 'controller_fs'
|
||||
|
||||
# expected_api_fields are attributes that should be populated by
|
||||
# an API query
|
||||
expected_api_fields = ['logical_volume',
|
||||
'uuid',
|
||||
'links',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'name',
|
||||
'state',
|
||||
'isystem_uuid',
|
||||
'replicated',
|
||||
'forisystemid',
|
||||
'size']
|
||||
|
||||
# hidden_api_fields are attributes that should not be populated by
|
||||
# an API query
|
||||
hidden_api_fields = ['forisystemid']
|
||||
|
||||
def setUp(self):
|
||||
super(ApiControllerFSTestCaseMixin, self).setUp()
|
||||
self.controller_fs_first = self._create_db_object('platform',
|
||||
10,
|
||||
'platform-lv')
|
||||
self.controller_fs_second = self._create_db_object('database',
|
||||
5,
|
||||
'pgsql-lv')
|
||||
self.controller_fs_third = self._create_db_object('extension',
|
||||
1,
|
||||
'extension-lv')
|
||||
self.fake_conductor_api = FakeConductorAPI()
|
||||
p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI')
|
||||
self.mock_conductor_api = p.start()
|
||||
self.mock_conductor_api.return_value = self.fake_conductor_api
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
def get_show_url(self, uuid):
|
||||
return '%s/%s' % (self.API_PREFIX, uuid)
|
||||
|
||||
def get_detail_url(self):
|
||||
return '%s/detail' % (self.API_PREFIX)
|
||||
|
||||
def get_update_url(self, system_uuid):
|
||||
return '/isystems/%s/controller_fs/update_many' % (system_uuid)
|
||||
|
||||
def get_sorted_list_url(self, sort_attr, sort_dir):
|
||||
return '%s/?sort_key=%s&sort_dir=%s' % (self.API_PREFIX, sort_attr,
|
||||
sort_dir)
|
||||
|
||||
def _create_db_object(self, controller_fs_name, controller_fs_size,
|
||||
controller_lv, obj_id=None):
|
||||
return dbutils.create_test_controller_fs(id=obj_id,
|
||||
uuid=None,
|
||||
name=controller_fs_name,
|
||||
forisystemid=self.system.id,
|
||||
state='available',
|
||||
size=controller_fs_size,
|
||||
logical_volume=controller_lv,
|
||||
replicated=True,
|
||||
isystem_uuid=self.system.uuid)
|
||||
|
||||
|
||||
class ApiControllerFSListTestSuiteMixin(ApiControllerFSTestCaseMixin):
|
||||
""" Controller FileSystem List GET operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiControllerFSListTestSuiteMixin, self).setUp()
|
||||
|
||||
def test_success_fetch_controller_fs_list(self):
|
||||
response = self.get_json(self.API_PREFIX, headers=self.API_HEADERS)
|
||||
|
||||
# Verify the values of the response with the values stored in database
|
||||
result_one = response[self.RESULT_KEY][0]
|
||||
result_two = response[self.RESULT_KEY][1]
|
||||
self.assertTrue(result_one['name'] == self.controller_fs_first.name or
|
||||
result_two['name'] == self.controller_fs_first.name)
|
||||
self.assertTrue(result_one['name'] == self.controller_fs_second.name or
|
||||
result_two['name'] == self.controller_fs_second.name)
|
||||
|
||||
def test_success_fetch_controller_fs_sorted_list(self):
|
||||
response = self.get_json(self.get_sorted_list_url('name', 'asc'))
|
||||
|
||||
# Verify the values of the response are returned in a sorted order
|
||||
result_one = response[self.RESULT_KEY][0]
|
||||
result_two = response[self.RESULT_KEY][1]
|
||||
result_three = response[self.RESULT_KEY][2]
|
||||
self.assertEqual(result_one['name'], self.controller_fs_second.name)
|
||||
self.assertEqual(result_two['name'], self.controller_fs_third.name)
|
||||
self.assertEqual(result_three['name'], self.controller_fs_first.name)
|
||||
|
||||
|
||||
class ApiControllerFSShowTestSuiteMixin(ApiControllerFSTestCaseMixin):
|
||||
""" Controller FileSystem Show GET operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiControllerFSShowTestSuiteMixin, self).setUp()
|
||||
|
||||
def test_fetch_controller_fs_object(self):
|
||||
url = self.get_show_url(self.controller_fs_first.uuid)
|
||||
response = self.get_json(url)
|
||||
# Verify the values of the response with the values stored in database
|
||||
self.assertTrue(response['name'], self.controller_fs_first.name)
|
||||
self.assertTrue(response['logical_volume'],
|
||||
self.controller_fs_first.logical_volume)
|
||||
self.assertTrue(response['state'], self.controller_fs_first.state)
|
||||
self.assertTrue(response['replicated'],
|
||||
self.controller_fs_first.replicated)
|
||||
self.assertTrue(response['size'], self.controller_fs_first.size)
|
||||
self.assertTrue(response['uuid'], self.controller_fs_first.uuid)
|
||||
|
||||
|
||||
class ApiControllerFSPutTestSuiteMixin(ApiControllerFSTestCaseMixin):
|
||||
""" Controller FileSystem Put operations
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(ApiControllerFSPutTestSuiteMixin, self).setUp()
|
||||
self.fake_lv_size = self.fake_conductor_api.get_controllerfs_lv_sizes
|
||||
p = mock.patch(
|
||||
'sysinv.api.controllers.v1.utils.is_host_state_valid_for_fs_resize')
|
||||
self.mock_utils_is_virtual = p.start()
|
||||
self.mock_utils_is_virtual.return_value = True
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
def exception_controller_fs(self):
|
||||
print('Raised a fake exception')
|
||||
raise FakeException
|
||||
|
||||
def test_put_duplicate_fs_name(self):
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("Duplicate fs_name 'extension' in parameter list",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_invalid_fs_name(self):
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "invalid_name",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("ControllerFs update failed: invalid filesystem",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_invalid_fs_size(self):
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "invalid_size",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "4",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("ControllerFs update failed: filesystem \'extension\' "
|
||||
"size must be an integer", response.json['error_message'])
|
||||
|
||||
def test_put_smaller_than_existing_fs_size(self):
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "4",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("ControllerFs update failed: size for "
|
||||
"filesystem \'database\' should be bigger than 5",
|
||||
response.json['error_message'])
|
||||
|
||||
@mock.patch('sysinv.api.controllers.v1.utils.is_drbd_fs_resizing')
|
||||
def test_put_drbd_sync_error(self, is_drbd_fs_resizing):
|
||||
is_drbd_fs_resizing.return_value = True
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "4",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("A drbd sync operation is currently in progress. "
|
||||
"Retry again later.",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_size_not_found(self):
|
||||
# Return fake dictionary for logical volume and size
|
||||
self.fake_lv_size.return_value = {'extension-lv': 1,
|
||||
'platform-lv': 10}
|
||||
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("Unable to determine the current size of pgsql-lv. "
|
||||
"Rejecting modification request.",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_minimum_size(self):
|
||||
# Return fake dictionary for logical volume and size
|
||||
self.fake_lv_size.return_value = {'extension-lv': 1,
|
||||
'pgsql-lv': 5,
|
||||
'platform-lv': 16}
|
||||
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("'platform' must be at least: 16",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_insufficient_backup_size(self):
|
||||
# Return fake dictionary for logical volume and size
|
||||
self.fake_lv_size.return_value = {'extension-lv': 1,
|
||||
'pgsql-lv': 5,
|
||||
'platform-lv': 10}
|
||||
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("backup size of 0 is insufficient for host controller-0. "
|
||||
"Minimum backup size of 21 is required based upon "
|
||||
"platform size 10 and database size 6. "
|
||||
"Rejecting modification request.",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_unprovisioned_physical_volume(self):
|
||||
# Create an unprovisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=1,
|
||||
pv_state='unprovisioned')
|
||||
|
||||
# Return fake dictionary for logical volume and size
|
||||
self.fake_lv_size.return_value = {'extension-lv': 1,
|
||||
'pgsql-lv': 5,
|
||||
'platform-lv': 10}
|
||||
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("Cannot resize filesystem. There are still "
|
||||
"unprovisioned physical volumes on controller-0.",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_exceed_growth_limit(self):
|
||||
# Create a provisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=1,
|
||||
pv_state='provisioned')
|
||||
# Create a logical volume
|
||||
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
lvm_vg_size=200,
|
||||
lvm_vg_free_pe=50)
|
||||
|
||||
# Create a host filesystem
|
||||
dbutils.create_test_host_fs(name='backup',
|
||||
forihostid=self.host.id)
|
||||
|
||||
# Return fake dictionary for logical volume and size
|
||||
self.fake_lv_size.return_value = {'extension-lv': 1,
|
||||
'pgsql-lv': 5,
|
||||
'platform-lv': 10}
|
||||
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("Total target growth size 9 GiB "
|
||||
"exceeds growth limit of 0 GiB.",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_update_exception(self):
|
||||
# Create a provisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
pv_state='provisioned')
|
||||
|
||||
# Create a logical volume
|
||||
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id)
|
||||
|
||||
# Create a host filesystem
|
||||
dbutils.create_test_host_fs(name='backup',
|
||||
forihostid=self.host.id)
|
||||
|
||||
# Return fake dictionary for logical volume and size
|
||||
self.fake_lv_size.return_value = {'extension-lv': 1,
|
||||
'pgsql-lv': 5,
|
||||
'platform-lv': 10}
|
||||
|
||||
# Throw a fake exception
|
||||
fake_update = self.fake_conductor_api.update_storage_config
|
||||
fake_update.side_effect = self.exception_controller_fs
|
||||
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("Failed to update filesystem size",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_success(self):
|
||||
# Create a provisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
pv_state='provisioned')
|
||||
|
||||
# Create a logical volume
|
||||
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id)
|
||||
|
||||
# Create a host filesystem
|
||||
dbutils.create_test_host_fs(name='backup',
|
||||
forihostid=self.host.id)
|
||||
|
||||
# Return fake dictionary for logical volume and size
|
||||
self.fake_lv_size.return_value = {'extension-lv': 1,
|
||||
'pgsql-lv': 5,
|
||||
'platform-lv': 10}
|
||||
|
||||
response = self.put_json(self.get_update_url(self.system.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "database",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "6",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify a NO CONTENT response is given
|
||||
self.assertEqual(response.status_code, http_client.NO_CONTENT)
|
||||
|
||||
|
||||
class ApiControllerFSDetailTestSuiteMixin(ApiControllerFSTestCaseMixin):
|
||||
""" Controller FileSystem detail operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiControllerFSDetailTestSuiteMixin, self).setUp()
|
||||
|
||||
# Test that a valid PATCH operation is blocked by the API
|
||||
def test_success_detail(self):
|
||||
# Test that a valid PATCH operation is blocked by the API
|
||||
response = self.get_json(self.get_detail_url(),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
result_one = response.json[self.RESULT_KEY][0]
|
||||
result_two = response.json[self.RESULT_KEY][1]
|
||||
result_three = response.json[self.RESULT_KEY][2]
|
||||
|
||||
# Response object 1
|
||||
self.assertEqual(result_one['size'], self.controller_fs_first.size)
|
||||
self.assertEqual(result_one['isystem_uuid'], self.controller_fs_first.isystem_uuid)
|
||||
self.assertEqual(result_one['name'], self.controller_fs_first.name)
|
||||
self.assertEqual(result_one['logical_volume'], self.controller_fs_first.logical_volume)
|
||||
self.assertEqual(result_one['forisystemid'], self.controller_fs_first.forisystemid)
|
||||
self.assertEqual(result_one['action'], None)
|
||||
self.assertEqual(result_one['uuid'], self.controller_fs_first.uuid)
|
||||
self.assertEqual(result_one['state'], self.controller_fs_first.state)
|
||||
self.assertEqual(result_one['replicated'], self.controller_fs_first.replicated)
|
||||
|
||||
# Response object 2
|
||||
self.assertEqual(result_two['size'], self.controller_fs_second.size)
|
||||
self.assertEqual(result_two['isystem_uuid'], self.controller_fs_second.isystem_uuid)
|
||||
self.assertEqual(result_two['name'], self.controller_fs_second.name)
|
||||
self.assertEqual(result_two['logical_volume'], self.controller_fs_second.logical_volume)
|
||||
self.assertEqual(result_two['forisystemid'], self.controller_fs_second.forisystemid)
|
||||
self.assertEqual(result_two['action'], None)
|
||||
self.assertEqual(result_two['uuid'], self.controller_fs_second.uuid)
|
||||
self.assertEqual(result_two['state'], self.controller_fs_second.state)
|
||||
self.assertEqual(result_two['replicated'], self.controller_fs_second.replicated)
|
||||
|
||||
# Response object 3
|
||||
self.assertEqual(result_three['size'], self.controller_fs_third.size)
|
||||
self.assertEqual(result_three['isystem_uuid'], self.controller_fs_third.isystem_uuid)
|
||||
self.assertEqual(result_three['name'], self.controller_fs_third.name)
|
||||
self.assertEqual(result_three['logical_volume'], self.controller_fs_third.logical_volume)
|
||||
self.assertEqual(result_three['forisystemid'], self.controller_fs_third.forisystemid)
|
||||
self.assertEqual(result_three['action'], None)
|
||||
self.assertEqual(result_three['uuid'], self.controller_fs_third.uuid)
|
||||
self.assertEqual(result_three['state'], self.controller_fs_third.state)
|
||||
self.assertEqual(result_three['replicated'], self.controller_fs_third.replicated)
|
||||
|
||||
|
||||
class ApiControllerFSPatchTestSuiteMixin(ApiControllerFSTestCaseMixin):
|
||||
""" Controller FileSystem patch operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiControllerFSPatchTestSuiteMixin, self).setUp()
|
||||
|
||||
# Test that a valid PATCH operation is blocked by the API
|
||||
# API should return 400 BAD_REQUEST or FORBIDDEN 403
|
||||
def test_patch_not_allowed(self):
|
||||
uuid = self.controller_fs_third.uuid
|
||||
response = self.patch_json(self.get_show_url(uuid),
|
||||
[{"path": "/name",
|
||||
"value": "extension",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "2",
|
||||
"op": "replace"}],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.FORBIDDEN)
|
||||
self.assertIn("Operation not permitted", response.json['error_message'])
|
||||
|
||||
|
||||
class ApiControllerFSDeleteTestSuiteMixin(ApiControllerFSTestCaseMixin):
|
||||
""" Controller FileSystem delete operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiControllerFSDeleteTestSuiteMixin, self).setUp()
|
||||
|
||||
# Test that a valid DELETE operation is blocked by the API
|
||||
# API should return 400 BAD_REQUEST or FORBIDDEN 403
|
||||
def test_delete_not_allowed(self):
|
||||
uuid = self.controller_fs_third.uuid
|
||||
response = self.delete(self.get_show_url(uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.FORBIDDEN)
|
||||
self.assertIn("Operation not permitted", response.json['error_message'])
|
||||
|
||||
|
||||
class ApiControllerFSPostTestSuiteMixin(ApiControllerFSTestCaseMixin):
|
||||
""" Controller FileSystem post operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiControllerFSPostTestSuiteMixin, self).setUp()
|
||||
|
||||
# Test that a valid POST operation is blocked by the API
|
||||
# API should return 400 BAD_REQUEST or FORBIDDEN 403
|
||||
def test_post_not_allowed(self):
|
||||
response = self.post_json(self.API_PREFIX,
|
||||
{'name': 'platform-new',
|
||||
'size': 10,
|
||||
'logical_volume': 'platform-lv'},
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.FORBIDDEN)
|
||||
self.assertIn("Operation not permitted", response.json['error_message'])
|
|
@ -9,7 +9,6 @@ Tests for the API / dns / methods.
|
|||
"""
|
||||
|
||||
import mock
|
||||
import unittest
|
||||
from six.moves import http_client
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import base as dbbase
|
||||
|
@ -123,7 +122,6 @@ class ApiDNSPatchTestSuiteMixin(ApiDNSTestCaseMixin):
|
|||
|
||||
def setUp(self):
|
||||
super(ApiDNSPatchTestSuiteMixin, self).setUp()
|
||||
self.patch_object = self._create_db_object()
|
||||
if(self.is_ipv4):
|
||||
self.patch_value_no_change = '8.8.8.8,8.8.4.4'
|
||||
self.patch_value_changed = '8.8.8.8'
|
||||
|
@ -135,6 +133,7 @@ class ApiDNSPatchTestSuiteMixin(ApiDNSTestCaseMixin):
|
|||
self.patch_value_more_than_permitted = '2001:4860:4860::8888,2001:4860:4860::8844,'\
|
||||
'2001:4860:4860::4444,2001:4860:4860::8888'
|
||||
self.patch_value_hostname = "dns.google"
|
||||
self.patch_object = self._create_db_object()
|
||||
|
||||
def exception_dns(self):
|
||||
print('Raised a fake exception')
|
||||
|
@ -282,12 +281,32 @@ class ApiDNSListTestSuiteMixin(ApiDNSTestCaseMixin):
|
|||
self.assertEqual(response[self.RESULT_KEY][0]['uuid'], self.dns_uuid)
|
||||
|
||||
|
||||
# ============= IPv4 environment tests ==============
|
||||
# Tests DNS Api operations for a Controller (defaults to IPv4)
|
||||
class PlatformIPv4ControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ControllerHostTestCase):
|
||||
pass
|
||||
def test_patch_ip_version_mismatch(self):
|
||||
self.is_ipv4 = True
|
||||
self.patch_object = self._create_db_object()
|
||||
self.patch_value_no_change = '2001:4860:4860::8888,2001:4860:4860::8844'
|
||||
self.patch_value_changed = '2001:4860:4860::8888'
|
||||
self.patch_value_more_than_permitted = '2001:4860:4860::8888,2001:4860:4860::8844,'\
|
||||
'2001:4860:4860::4444,2001:4860:4860::8888'
|
||||
self.patch_value_hostname = "dns.google"
|
||||
|
||||
# Update value of patchable field
|
||||
response = self.patch_json(self.get_single_url(self.patch_object.uuid),
|
||||
[{'path': self.patch_path_nameserver,
|
||||
'value': self.patch_value_changed,
|
||||
'op': 'replace'},
|
||||
{"path": self.patch_path_action,
|
||||
"value": "apply",
|
||||
"op": "replace"}],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
expected_msg = "IP version mismatch: was expecting IPv4, IPv6 received"
|
||||
self.assertIn(expected_msg, response.json['error_message'])
|
||||
|
||||
|
||||
class PlatformIPv4ControllerApiDNSListTestCase(ApiDNSListTestSuiteMixin,
|
||||
|
@ -314,7 +333,28 @@ class PlatformIPv6ControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin,
|
|||
dbbase.BaseIPv6Mixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ControllerHostTestCase):
|
||||
pass
|
||||
def test_patch_ip_version_mismatch(self):
|
||||
self.is_ipv4 = False
|
||||
self.patch_object = self._create_db_object()
|
||||
self.patch_value_no_change = '8.8.8.8,8.8.4.4'
|
||||
self.patch_value_changed = '8.8.8.8'
|
||||
self.patch_value_more_than_permitted = '8.8.8.8,8.8.4.4,9.9.9.9,9.8.8.9'
|
||||
self.patch_value_hostname = "dns.google"
|
||||
|
||||
# Update value of patchable field
|
||||
response = self.patch_json(self.get_single_url(self.patch_object.uuid),
|
||||
[{'path': self.patch_path_nameserver,
|
||||
'value': self.patch_value_changed,
|
||||
'op': 'replace'},
|
||||
{"path": self.patch_path_action,
|
||||
"value": "apply",
|
||||
"op": "replace"}],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
expected_msg = "IP version mismatch: was expecting IPv6, IPv4 received"
|
||||
self.assertIn(expected_msg, response.json['error_message'])
|
||||
|
||||
|
||||
class PlatformIPv6ControllerApiDNSListTestCase(ApiDNSListTestSuiteMixin,
|
||||
|
@ -336,67 +376,3 @@ class PlatformIPv6ControllerApiDNSDeleteTestCase(ApiDNSDeleteTestSuiteMixin,
|
|||
base.FunctionalTest,
|
||||
dbbase.ControllerHostTestCase):
|
||||
pass
|
||||
|
||||
|
||||
# ============= IPv6 DNS in IPv4 environment tests ==============
|
||||
class PlatformIPv6inIPv4OAMControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ControllerHostTestCase):
|
||||
def setUp(self):
|
||||
super(PlatformIPv6inIPv4OAMControllerApiDNSPatchTestCase, self).setUp()
|
||||
self.is_ipv4 = False
|
||||
self.patch_object = self._create_db_object()
|
||||
self.patch_value_no_change = '2001:4860:4860::8888,2001:4860:4860::8844'
|
||||
self.patch_value_changed = '2001:4860:4860::8888'
|
||||
self.patch_value_more_than_permitted = '2001:4860:4860::8888,2001:4860:4860::8844,'\
|
||||
'2001:4860:4860::4444,2001:4860:4860::8888'
|
||||
self.patch_value_hostname = "dns.google"
|
||||
|
||||
# See https://bugs.launchpad.net/starlingx/+bug/1860489
|
||||
@unittest.expectedFailure
|
||||
def test_patch_valid_change(self):
|
||||
# Update value of patchable field
|
||||
response = self.patch_json(self.get_single_url(self.patch_object.uuid),
|
||||
[{'path': self.patch_path_nameserver,
|
||||
'value': self.patch_value_changed,
|
||||
'op': 'replace'},
|
||||
{"path": self.patch_path_action,
|
||||
"value": "apply",
|
||||
"op": "replace"}],
|
||||
headers=self.API_HEADERS)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
|
||||
pass
|
||||
|
||||
|
||||
# ============= IPv4 DNS in IPv6 environment tests ==============
|
||||
class PlatformIPv4inIPv6ControllerApiDNSPatchTestCase(ApiDNSPatchTestSuiteMixin,
|
||||
dbbase.BaseIPv6Mixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ControllerHostTestCase):
|
||||
def setUp(self):
|
||||
super(PlatformIPv4inIPv6ControllerApiDNSPatchTestCase, self).setUp()
|
||||
self.is_ipv4 = False
|
||||
self.patch_object = self._create_db_object()
|
||||
self.patch_value_no_change = '8.8.8.8,8.8.4.4'
|
||||
self.patch_value_changed = '8.8.8.8'
|
||||
self.patch_value_more_than_permitted = '8.8.8.8,8.8.4.4,9.9.9.9,9.8.8.9'
|
||||
self.patch_value_hostname = "dns.google"
|
||||
|
||||
# See https://bugs.launchpad.net/starlingx/+bug/1860489
|
||||
@unittest.expectedFailure
|
||||
def test_patch_valid_change(self):
|
||||
# Update value of patchable field
|
||||
response = self.patch_json(self.get_single_url(self.patch_object.uuid),
|
||||
[{'path': self.patch_path_nameserver,
|
||||
'value': self.patch_value_changed,
|
||||
'op': 'replace'},
|
||||
{"path": self.patch_path_action,
|
||||
"value": "apply",
|
||||
"op": "replace"}],
|
||||
headers=self.API_HEADERS)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
|
||||
pass
|
||||
|
|
|
@ -955,6 +955,42 @@ class TestPostKubeUpgrades(TestHost):
|
|||
self.assertIn("control plane on this host is already being upgraded",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_first_control_plane_after_networking_upgraded(self):
|
||||
# Test re-upgrading kubernetes first control plane after networking was upgraded
|
||||
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_UNLOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
|
||||
# Create the upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADED_NETWORKING,
|
||||
)
|
||||
|
||||
# The control plane on this host was already upgraded
|
||||
# to the new version
|
||||
self.kube_get_control_plane_versions_result = {
|
||||
'controller-0': 'v1.42.2',
|
||||
'controller-1': 'v1.42.1'}
|
||||
|
||||
# Upgrade the first control plane
|
||||
result = self.post_json(
|
||||
'/ihosts/controller-0/kube_upgrade_control_plane',
|
||||
{}, headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertTrue(result.json['error_message'])
|
||||
self.assertIn("The first control plane was already upgraded",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_kube_upgrade_kubelet_controller_0(self):
|
||||
# Test upgrading kubernetes kubelet on controller-0
|
||||
|
||||
|
@ -1978,6 +2014,64 @@ class TestPatch(TestHost):
|
|||
result = self.get_json('/ihosts/%s' % c1_host['hostname'])
|
||||
self.assertEqual(constants.NONE_ACTION, result['action'])
|
||||
|
||||
def test_unlock_action_controller_while_upgrading_kubelet(self):
|
||||
# Create controller-0
|
||||
c0_host = self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
self._create_test_host_platform_interface(c0_host)
|
||||
|
||||
# Create a kube upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_KUBELETS,
|
||||
)
|
||||
|
||||
# Mark the kube host as kubelet upgrading
|
||||
values = {'status': kubernetes.KUBE_HOST_UPGRADING_KUBELET}
|
||||
self.dbapi.kube_host_upgrade_update(1, values)
|
||||
|
||||
# Unlock host
|
||||
response = self._patch_host_action(c0_host['hostname'],
|
||||
constants.UNLOCK_ACTION,
|
||||
'sysinv-test',
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
self.assertIn("Can not unlock controller-0 while upgrading "
|
||||
"kubelet", response.json['error_message'])
|
||||
|
||||
def test_force_unlock_action_controller_while_upgrading_kubelet(self):
|
||||
# Create controller-0
|
||||
c0_host = self._create_controller_0(
|
||||
invprovision=constants.PROVISIONED,
|
||||
administrative=constants.ADMIN_LOCKED,
|
||||
operational=constants.OPERATIONAL_ENABLED,
|
||||
availability=constants.AVAILABILITY_ONLINE)
|
||||
self._create_test_host_platform_interface(c0_host)
|
||||
|
||||
# Create a kube upgrade
|
||||
dbutils.create_test_kube_upgrade(
|
||||
from_version='v1.42.1',
|
||||
to_version='v1.42.2',
|
||||
state=kubernetes.KUBE_UPGRADING_KUBELETS,
|
||||
)
|
||||
|
||||
# Mark the kube host as kubelet upgrading
|
||||
values = {'status': kubernetes.KUBE_HOST_UPGRADING_KUBELET}
|
||||
self.dbapi.kube_host_upgrade_update(1, values)
|
||||
|
||||
# Unlock host
|
||||
response = self._patch_host_action(c0_host['hostname'],
|
||||
constants.FORCE_UNLOCK_ACTION,
|
||||
'sysinv-test',
|
||||
expect_errors=True)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
|
||||
def test_unlock_action_worker(self):
|
||||
# Create controller-0
|
||||
self._create_controller_0(
|
||||
|
|
|
@ -0,0 +1,526 @@
|
|||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Tests for the API / host-fs / methods.
|
||||
"""
|
||||
|
||||
import mock
|
||||
import uuid
|
||||
from six.moves import http_client
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import base as dbbase
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
from sysinv.common import constants
|
||||
|
||||
|
||||
class FakeConductorAPI(object):
|
||||
|
||||
def __init__(self):
|
||||
self.get_controllerfs_lv_sizes = mock.MagicMock()
|
||||
self.update_host_filesystem_config = mock.MagicMock()
|
||||
|
||||
|
||||
class FakeException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ApiHostFSTestCaseMixin(base.FunctionalTest,
|
||||
dbbase.ControllerHostTestCase):
|
||||
|
||||
# API_HEADERS are a generic header passed to most API calls
|
||||
API_HEADERS = {'User-Agent': 'sysinv-test'}
|
||||
|
||||
# API_PREFIX is the prefix for the URL
|
||||
API_PREFIX = '/ihosts'
|
||||
|
||||
# RESULT_KEY is the python table key for the list of results
|
||||
RESULT_KEY = 'host_fs'
|
||||
|
||||
def setUp(self):
|
||||
super(ApiHostFSTestCaseMixin, self).setUp()
|
||||
self.host_fs_first = self._create_db_object('scratch',
|
||||
8,
|
||||
'scratch-lv')
|
||||
self.host_fs_second = self._create_db_object('backup',
|
||||
20,
|
||||
'backup-lv')
|
||||
self.host_fs_third = self._create_db_object('docker',
|
||||
30,
|
||||
'docker-lv')
|
||||
self.fake_conductor_api = FakeConductorAPI()
|
||||
p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI')
|
||||
self.mock_conductor_api = p.start()
|
||||
self.mock_conductor_api.return_value = self.fake_conductor_api
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
def get_list_url(self, host_uuid):
|
||||
return '%s/%s/host_fs' % (self.API_PREFIX, host_uuid)
|
||||
|
||||
def get_single_fs_url(self, host_fs_uuid):
|
||||
return '/host_fs/%s' % (host_fs_uuid)
|
||||
|
||||
def get_post_url(self):
|
||||
return '/host_fs' % (self.API_PREFIX)
|
||||
|
||||
def get_detail_url(self):
|
||||
return '/host_fs/detail'
|
||||
|
||||
def get_update_many_url(self, host_uuid):
|
||||
return '%s/%s/host_fs/update_many' % (self.API_PREFIX, host_uuid)
|
||||
|
||||
def get_sorted_list_url(self, host_uuid, sort_attr, sort_dir):
|
||||
return '%s/%s/host_fs/?sort_key=%s&sort_dir=%s' % (self.API_PREFIX,
|
||||
host_uuid,
|
||||
sort_attr,
|
||||
sort_dir)
|
||||
|
||||
def _create_db_object(self, host_fs_name, host_fs_size,
|
||||
host_lv, obj_id=None):
|
||||
return dbutils.create_test_host_fs(id=obj_id,
|
||||
uuid=None,
|
||||
name=host_fs_name,
|
||||
forihostid=self.host.id,
|
||||
size=host_fs_size,
|
||||
logical_volume=host_lv)
|
||||
|
||||
|
||||
class ApiHostFSListTestSuiteMixin(ApiHostFSTestCaseMixin):
|
||||
""" Host FileSystem List GET operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiHostFSListTestSuiteMixin, self).setUp()
|
||||
|
||||
def test_success_fetch_host_fs_list(self):
|
||||
response = self.get_json(self.get_list_url(self.host.uuid),
|
||||
headers=self.API_HEADERS)
|
||||
|
||||
# Verify the values of the response with the values stored in database
|
||||
result_one = response[self.RESULT_KEY][0]
|
||||
result_two = response[self.RESULT_KEY][1]
|
||||
self.assertTrue(result_one['name'] == self.host_fs_first.name or
|
||||
result_two['name'] == self.host_fs_first.name)
|
||||
self.assertTrue(result_one['name'] == self.host_fs_second.name or
|
||||
result_two['name'] == self.host_fs_second.name)
|
||||
|
||||
def test_success_fetch_host_fs_sorted_list(self):
|
||||
response = self.get_json(self.get_sorted_list_url(self.host.uuid,
|
||||
'name',
|
||||
'asc'))
|
||||
|
||||
# Verify the values of the response are returned in a sorted order
|
||||
result_one = response[self.RESULT_KEY][0]
|
||||
result_two = response[self.RESULT_KEY][1]
|
||||
result_three = response[self.RESULT_KEY][2]
|
||||
self.assertEqual(result_one['name'], self.host_fs_second.name)
|
||||
self.assertEqual(result_two['name'], self.host_fs_third.name)
|
||||
self.assertEqual(result_three['name'], self.host_fs_first.name)
|
||||
|
||||
def test_fetch_list_invalid_host(self):
|
||||
# Generate random uuid
|
||||
random_uuid = uuid.uuid1()
|
||||
response = self.get_json(self.get_list_url(random_uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify that no host fs is returned for a non-existant host UUID
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
self.assertEqual(response.json['host_fs'], [])
|
||||
|
||||
|
||||
class ApiHostFSShowTestSuiteMixin(ApiHostFSTestCaseMixin):
|
||||
""" Host FileSystem Show GET operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiHostFSShowTestSuiteMixin, self).setUp()
|
||||
|
||||
def test_fetch_host_fs_object(self):
|
||||
url = self.get_single_fs_url(self.host_fs_first.uuid)
|
||||
response = self.get_json(url)
|
||||
|
||||
# Verify the values of the response with the values stored in database
|
||||
self.assertTrue(response['name'], self.host_fs_first.name)
|
||||
self.assertTrue(response['logical_volume'],
|
||||
self.host_fs_first.logical_volume)
|
||||
self.assertTrue(response['size'], self.host_fs_first.size)
|
||||
self.assertTrue(response['uuid'], self.host_fs_first.uuid)
|
||||
self.assertTrue(response['ihost_uuid'], self.host.uuid)
|
||||
|
||||
|
||||
class ApiHostFSPatchSingleTestSuiteMixin(ApiHostFSTestCaseMixin):
|
||||
""" Individual Host FileSystem Patch operations
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(ApiHostFSPatchSingleTestSuiteMixin, self).setUp()
|
||||
|
||||
def test_individual_patch_not_allowed(self):
|
||||
url = self.get_single_fs_url(self.host_fs_first.uuid)
|
||||
response = self.patch_json(url,
|
||||
[],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.FORBIDDEN)
|
||||
self.assertIn("Operation not permitted.",
|
||||
response.json['error_message'])
|
||||
|
||||
|
||||
class ApiHostFSPutTestSuiteMixin(ApiHostFSTestCaseMixin):
|
||||
""" Host FileSystem Put operations
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(ApiHostFSPutTestSuiteMixin, self).setUp()
|
||||
|
||||
def exception_host_fs(self):
|
||||
raise FakeException
|
||||
|
||||
def test_put_invalid_fs_name(self):
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "invalid",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "10",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "100",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("HostFs update failed: invalid filesystem 'invalid'",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_invalid_fs_size(self):
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "invalid_size",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "backup",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "100",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("HostFs update failed: filesystem 'scratch' "
|
||||
"size must be an integer", response.json['error_message'])
|
||||
|
||||
def test_put_smaller_than_existing_fs_size(self):
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "7",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "backup",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "100",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("HostFs update failed: size for filesystem \'scratch\' "
|
||||
"should be bigger than 8", response.json['error_message'])
|
||||
|
||||
def test_put_unprovisioned_physical_volume(self):
|
||||
# Create an unprovisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
pv_state='unprovisioned')
|
||||
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "10",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "backup",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "100",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("There are still unprovisioned physical volumes "
|
||||
"on \'controller-0\'. Cannot perform operation.",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_put_not_enough_space(self):
|
||||
# Create a provisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
pv_state='provisioned')
|
||||
# Create a logical volume
|
||||
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
lvm_vg_size=200,
|
||||
lvm_vg_free_pe=50)
|
||||
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "10",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "backup",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "100",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("HostFs update failed: Not enough free space on "
|
||||
"cgts-vg. Current free space 0 GiB, requested total "
|
||||
"increase 82 GiB", response.json['error_message'])
|
||||
|
||||
def test_put_success_with_unprovisioned_host(self):
|
||||
# Create a provisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
pv_state='provisioned')
|
||||
|
||||
# Create a logical volume
|
||||
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id)
|
||||
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "10",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "backup",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "21",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify a NO CONTENT response is given
|
||||
self.assertEqual(response.status_code, http_client.NO_CONTENT)
|
||||
|
||||
def test_put_success_with_provisioned_host(self):
|
||||
# Create a provisioned host
|
||||
self.host = self._create_test_host(personality=constants.CONTROLLER,
|
||||
unit=1,
|
||||
invprovision=constants.PROVISIONED)
|
||||
|
||||
# Add host fs for the new host
|
||||
self.host_fs_first = self._create_db_object('scratch',
|
||||
8,
|
||||
'scratch-lv')
|
||||
self.host_fs_second = self._create_db_object('backup',
|
||||
20,
|
||||
'backup-lv')
|
||||
self.host_fs_third = self._create_db_object('docker',
|
||||
30,
|
||||
'docker-lv')
|
||||
|
||||
# Create a provisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
pv_state='provisioned')
|
||||
|
||||
# Create a logical volume
|
||||
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id)
|
||||
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "10",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "backup",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "21",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify a NO CONTENT response is given
|
||||
self.assertEqual(response.status_code, http_client.NO_CONTENT)
|
||||
|
||||
def test_put_update_exception(self):
|
||||
# Create a provisioned host
|
||||
self.host = self._create_test_host(personality=constants.CONTROLLER,
|
||||
unit=1,
|
||||
invprovision=constants.PROVISIONED)
|
||||
|
||||
# Add host fs for the new host
|
||||
self.host_fs_first = self._create_db_object('scratch',
|
||||
8,
|
||||
'scratch-lv')
|
||||
self.host_fs_second = self._create_db_object('backup',
|
||||
20,
|
||||
'backup-lv')
|
||||
self.host_fs_third = self._create_db_object('docker',
|
||||
30,
|
||||
'docker-lv')
|
||||
|
||||
# Create a provisioned physical volume in database
|
||||
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id,
|
||||
pv_state='provisioned')
|
||||
|
||||
# Create a logical volume
|
||||
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
|
||||
forihostid=self.host.id)
|
||||
|
||||
# Throw a fake exception
|
||||
fake_update = self.fake_conductor_api.update_host_filesystem_config
|
||||
fake_update.side_effect = self.exception_host_fs
|
||||
|
||||
response = self.put_json(self.get_update_many_url(self.host.uuid),
|
||||
[[{"path": "/name",
|
||||
"value": "scratch",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "10",
|
||||
"op": "replace"}],
|
||||
[{"path": "/name",
|
||||
"value": "backup",
|
||||
"op": "replace"},
|
||||
{"path": "/size",
|
||||
"value": "21",
|
||||
"op": "replace"}]],
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
|
||||
self.assertIn("Failed to update filesystem size for controller-1",
|
||||
response.json['error_message'])
|
||||
|
||||
|
||||
class ApiHostFSDetailTestSuiteMixin(ApiHostFSTestCaseMixin):
|
||||
""" Host FileSystem detail operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiHostFSDetailTestSuiteMixin, self).setUp()
|
||||
|
||||
# Test that a valid PATCH operation is blocked by the API
|
||||
def test_success_detail(self):
|
||||
# Test that a valid PATCH operation is blocked by the API
|
||||
response = self.get_json(self.get_detail_url(),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
self.assertEqual(response.status_code, http_client.OK)
|
||||
result_one = response.json[self.RESULT_KEY][0]
|
||||
result_two = response.json[self.RESULT_KEY][1]
|
||||
result_three = response.json[self.RESULT_KEY][2]
|
||||
|
||||
# Response object 1
|
||||
self.assertEqual(result_one['size'], self.host_fs_first.size)
|
||||
self.assertEqual(result_one['name'], self.host_fs_first.name)
|
||||
self.assertEqual(result_one['logical_volume'],
|
||||
self.host_fs_first.logical_volume)
|
||||
self.assertEqual(result_one['ihost_uuid'], self.host.uuid)
|
||||
self.assertEqual(result_one['uuid'], self.host_fs_first.uuid)
|
||||
|
||||
# Response object 2
|
||||
self.assertEqual(result_two['size'], self.host_fs_second.size)
|
||||
self.assertEqual(result_two['name'], self.host_fs_second.name)
|
||||
self.assertEqual(result_two['logical_volume'],
|
||||
self.host_fs_second.logical_volume)
|
||||
self.assertEqual(result_two['ihost_uuid'], self.host.uuid)
|
||||
self.assertEqual(result_two['uuid'], self.host_fs_second.uuid)
|
||||
|
||||
# Response object 3
|
||||
self.assertEqual(result_three['size'], self.host_fs_third.size)
|
||||
self.assertEqual(result_three['name'], self.host_fs_third.name)
|
||||
self.assertEqual(result_three['logical_volume'],
|
||||
self.host_fs_third.logical_volume)
|
||||
self.assertEqual(result_three['ihost_uuid'], self.host.uuid)
|
||||
self.assertEqual(result_three['uuid'], self.host_fs_third.uuid)
|
||||
|
||||
|
||||
class ApiHostFSDeleteTestSuiteMixin(ApiHostFSTestCaseMixin):
|
||||
""" Host FileSystem delete operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiHostFSDeleteTestSuiteMixin, self).setUp()
|
||||
|
||||
# Test that a valid DELETE operation is blocked by the API
|
||||
# API should return 400 BAD_REQUEST or FORBIDDEN 403
|
||||
def test_delete_not_allowed(self):
|
||||
uuid = self.host_fs_third.uuid
|
||||
response = self.delete(self.get_single_fs_url(uuid),
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.FORBIDDEN)
|
||||
self.assertIn("Operation not permitted", response.json['error_message'])
|
||||
|
||||
|
||||
class ApiHostFSPostTestSuiteMixin(ApiHostFSTestCaseMixin):
|
||||
""" Host FileSystem post operations
|
||||
"""
|
||||
def setUp(self):
|
||||
super(ApiHostFSPostTestSuiteMixin, self).setUp()
|
||||
|
||||
# Test that a valid POST operation is blocked by the API
|
||||
# API should return 400 BAD_REQUEST or FORBIDDEN 403
|
||||
def test_post_not_allowed(self):
|
||||
response = self.post_json('/host_fs',
|
||||
{'name': 'kubelet',
|
||||
'size': 10,
|
||||
'logical_volume': 'kubelet-lv'},
|
||||
headers=self.API_HEADERS,
|
||||
expect_errors=True)
|
||||
|
||||
# Verify appropriate exception is raised
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.status_code, http_client.FORBIDDEN)
|
||||
self.assertIn("Operation not permitted", response.json['error_message'])
|
|
@ -470,6 +470,7 @@ class InterfaceTestCase(base.FunctionalTest, dbbase.BaseHostTestCase):
|
|||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertTrue(response.json['error_message'])
|
||||
return response
|
||||
|
||||
def _post_and_check(self, ndict, expect_errors=False, error_message=None):
|
||||
response = self.post_json('%s' % self._get_path(), ndict,
|
||||
|
@ -627,14 +628,11 @@ class InterfaceControllerVlanOverEthernet(InterfaceTestCase):
|
|||
class InterfaceWorkerEthernet(InterfaceTestCase):
|
||||
|
||||
def _setup_configuration(self):
|
||||
# Setup a sample configuration where the personality is set to a
|
||||
# worker and all interfaces are ethernet interfaces.
|
||||
self._create_host(constants.CONTROLLER, admin=constants.ADMIN_UNLOCKED)
|
||||
self._create_datanetworks()
|
||||
self._create_ethernet('oam', constants.NETWORK_TYPE_OAM)
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST)
|
||||
|
||||
# Setup a sample configuration where the personality is set to a
|
||||
# worker and all interfaces are ethernet interfaces.
|
||||
self._create_host(constants.WORKER, constants.WORKER,
|
||||
admin=constants.ADMIN_LOCKED)
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT,
|
||||
|
@ -682,19 +680,8 @@ class InterfaceWorkerEthernet(InterfaceTestCase):
|
|||
class InterfaceWorkerVlanOverEthernet(InterfaceTestCase):
|
||||
|
||||
def _setup_configuration(self):
|
||||
# Setup a sample configuration where the personality is set to a
|
||||
# controller and all interfaces are vlan interfaces over ethernet
|
||||
# interfaces.
|
||||
self._create_host(constants.CONTROLLER)
|
||||
self._create_datanetworks()
|
||||
port, iface = self._create_ethernet(
|
||||
'pxeboot', constants.NETWORK_TYPE_PXEBOOT)
|
||||
self._create_vlan('oam', constants.NETWORK_TYPE_OAM,
|
||||
constants.INTERFACE_CLASS_PLATFORM, 1, iface)
|
||||
self._create_vlan('mgmt', constants.NETWORK_TYPE_MGMT,
|
||||
constants.INTERFACE_CLASS_PLATFORM, 2, iface)
|
||||
self._create_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.INTERFACE_CLASS_PLATFORM, 3, iface)
|
||||
|
||||
# Setup a sample configuration where the personality is set to a
|
||||
# worker and all interfaces are vlan interfaces over ethernet
|
||||
|
@ -728,13 +715,8 @@ class InterfaceWorkerVlanOverEthernet(InterfaceTestCase):
|
|||
class InterfaceWorkerBond(InterfaceTestCase):
|
||||
|
||||
def _setup_configuration(self):
|
||||
# Setup a sample configuration where all platform interfaces are
|
||||
# aggregated ethernet interfaces.
|
||||
self._create_host(constants.CONTROLLER, admin=constants.ADMIN_UNLOCKED)
|
||||
self._create_datanetworks()
|
||||
self._create_bond('oam', constants.NETWORK_TYPE_OAM)
|
||||
self._create_bond('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
self._create_bond('cluster', constants.NETWORK_TYPE_CLUSTER_HOST)
|
||||
|
||||
# Setup a sample configuration where the personality is set to a
|
||||
# worker and all interfaces are aggregated ethernet interfaces.
|
||||
|
@ -766,14 +748,6 @@ class InterfaceWorkerVlanOverBond(InterfaceTestCase):
|
|||
def _setup_configuration(self):
|
||||
self._create_host(constants.CONTROLLER)
|
||||
self._create_datanetworks()
|
||||
bond = self._create_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT,
|
||||
constants.INTERFACE_CLASS_PLATFORM)
|
||||
self._create_vlan('oam', constants.NETWORK_TYPE_OAM,
|
||||
constants.INTERFACE_CLASS_PLATFORM, 1, bond)
|
||||
self._create_vlan('mgmt', constants.NETWORK_TYPE_MGMT,
|
||||
constants.INTERFACE_CLASS_PLATFORM, 2, bond)
|
||||
self._create_vlan('cluster', constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.INTERFACE_CLASS_PLATFORM, 3, bond)
|
||||
|
||||
# Setup a sample configuration where the personality is set to a
|
||||
# worker and all interfaces are vlan interfaces over aggregated
|
||||
|
@ -817,11 +791,6 @@ class InterfaceWorkerVlanOverDataEthernet(InterfaceTestCase):
|
|||
def _setup_configuration(self):
|
||||
self._create_host(constants.CONTROLLER)
|
||||
self._create_datanetworks()
|
||||
bond = self._create_bond('pxeboot', constants.NETWORK_TYPE_PXEBOOT)
|
||||
self._create_vlan('oam', constants.NETWORK_TYPE_OAM,
|
||||
constants.INTERFACE_CLASS_PLATFORM, 1, bond)
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
self._create_ethernet('cluster', constants.NETWORK_TYPE_CLUSTER_HOST)
|
||||
|
||||
# Setup a sample configuration where the personality is set to a
|
||||
# worker and all interfaces are vlan interfaces over data ethernet
|
||||
|
@ -1188,15 +1157,16 @@ class TestList(InterfaceTestCase):
|
|||
def setUp(self):
|
||||
super(TestList, self).setUp()
|
||||
self._create_host(constants.CONTROLLER)
|
||||
self._create_host(constants.WORKER, admin=constants.ADMIN_LOCKED)
|
||||
|
||||
def test_empty_interface(self):
|
||||
data = self.get_json('/ihosts/%s/iinterfaces' % self.hosts[0].uuid)
|
||||
data = self.get_json('/ihosts/%s/iinterfaces' % self.worker.uuid)
|
||||
self.assertEqual([], data['iinterfaces'])
|
||||
|
||||
def test_one(self):
|
||||
ndict = self._post_get_test_interface(ifname='eth0',
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
forihostid=self.hosts[0].id, ihost_uuid=self.hosts[0].uuid)
|
||||
forihostid=self.worker.id, ihost_uuid=self.worker.uuid)
|
||||
data = self.post_json('%s' % self._get_path(), ndict)
|
||||
|
||||
# Verify that the interface was created with the expected attributes
|
||||
|
@ -1243,7 +1213,7 @@ class TestPatchMixin(object):
|
|||
self._create_datanetworks()
|
||||
|
||||
def test_modify_ifname(self):
|
||||
interface = dbutils.create_test_interface(forihostid='1')
|
||||
interface = dbutils.create_test_interface(forihostid=self.worker.id)
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface.uuid),
|
||||
ifname='new_name')
|
||||
|
@ -1252,7 +1222,7 @@ class TestPatchMixin(object):
|
|||
self.assertEqual('new_name', response.json['ifname'])
|
||||
|
||||
def test_modify_mtu(self):
|
||||
interface = dbutils.create_test_interface(forihostid='1')
|
||||
interface = dbutils.create_test_interface(forihostid=self.worker.id)
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface.uuid),
|
||||
imtu=1600)
|
||||
|
@ -1352,10 +1322,10 @@ class TestPatchMixin(object):
|
|||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def _create_sriov_vf_driver_valid(self, vf_driver, expect_errors=False):
|
||||
interface = dbutils.create_test_interface(forihostid='1',
|
||||
interface = dbutils.create_test_interface(forihostid=self.worker.id,
|
||||
datanetworks='group0-data0')
|
||||
dbutils.create_test_ethernet_port(
|
||||
id=1, name='eth1', host_id=1, interface_id=interface.id,
|
||||
id=1, name='eth1', host_id=self.worker.id, interface_id=interface.id,
|
||||
pciaddr='0000:00:00.11', dev_id=0, sriov_totalvfs=1, sriov_numvfs=1,
|
||||
driver='i40e',
|
||||
sriov_vf_driver='i40evf')
|
||||
|
@ -1374,16 +1344,16 @@ class TestPatchMixin(object):
|
|||
self.assertEqual(vf_driver, response.json['sriov_vf_driver'])
|
||||
|
||||
def test_create_sriov_vf_driver_netdevice_valid(self):
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, host=self.worker)
|
||||
self._create_sriov_vf_driver_valid(
|
||||
constants.SRIOV_DRIVER_TYPE_NETDEVICE)
|
||||
|
||||
def test_create_sriov_vf_driver_vfio_valid(self):
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, host=self.worker)
|
||||
self._create_sriov_vf_driver_valid(constants.SRIOV_DRIVER_TYPE_VFIO)
|
||||
|
||||
def test_create_sriov_vf_driver_invalid(self):
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT, host=self.worker)
|
||||
self._create_sriov_vf_driver_valid('bad_driver', expect_errors=True)
|
||||
|
||||
def test_create_sriov_no_mgmt(self):
|
||||
|
@ -1454,7 +1424,8 @@ class TestPostMixin(object):
|
|||
def test_address_mode_pool_valid(self):
|
||||
port, interface = self._create_ethernet(
|
||||
'mgmt', constants.NETWORK_TYPE_MGMT,
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM)
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
host=self.worker)
|
||||
network = self._find_network_by_type(constants.NETWORK_TYPE_MGMT)
|
||||
pool = self._find_address_pool_by_uuid(network['pool_uuid'])
|
||||
if pool.family == constants.IPV4_FAMILY:
|
||||
|
@ -1475,7 +1446,8 @@ class TestPostMixin(object):
|
|||
def test_address_mode_static_valid(self):
|
||||
port, interface = self._create_ethernet(
|
||||
'mgmt', constants.NETWORK_TYPE_MGMT,
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM)
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
host=self.worker)
|
||||
network = self._find_network_by_type(constants.NETWORK_TYPE_MGMT)
|
||||
pool = self._find_address_pool_by_uuid(network['pool_uuid'])
|
||||
if pool.family == constants.IPV4_FAMILY:
|
||||
|
@ -1564,7 +1536,8 @@ class TestPostMixin(object):
|
|||
def test_address_pool_family_mismatch_invalid(self):
|
||||
port, interface = self._create_ethernet(
|
||||
'mgmt', constants.NETWORK_TYPE_MGMT,
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM)
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
host=self.worker)
|
||||
network = self._find_network_by_type(constants.NETWORK_TYPE_MGMT)
|
||||
pool = self._find_address_pool_by_uuid(network['pool_uuid'])
|
||||
if pool.family == constants.IPV4_FAMILY:
|
||||
|
@ -1662,17 +1635,19 @@ class TestPostMixin(object):
|
|||
|
||||
def test_aemode_invalid_platform(self):
|
||||
ndict = self._post_get_test_interface(
|
||||
ihost_uuid=self.controller.uuid,
|
||||
ihost_uuid=self.worker.uuid,
|
||||
ifname='name',
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
iftype=constants.INTERFACE_TYPE_AE,
|
||||
aemode='bad_aemode',
|
||||
txhashpolicy='layer2')
|
||||
self._post_and_check_failure(ndict)
|
||||
response = self._post_and_check_failure(ndict)
|
||||
self.assertIn("Invalid aggregated ethernet mode 'bad_aemode'",
|
||||
response.json['error_message'])
|
||||
|
||||
def test_setting_mgmt_mtu_allowed(self):
|
||||
ndict = self._post_get_test_interface(
|
||||
ihost_uuid=self.controller.uuid,
|
||||
ihost_uuid=self.worker.uuid,
|
||||
ifname='mgmt0',
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
iftype=constants.INTERFACE_TYPE_ETHERNET,
|
||||
|
@ -1681,7 +1656,7 @@ class TestPostMixin(object):
|
|||
|
||||
def test_setting_cluster_host_mtu_allowed(self):
|
||||
ndict = self._post_get_test_interface(
|
||||
ihost_uuid=self.controller.uuid,
|
||||
ihost_uuid=self.worker.uuid,
|
||||
ifname='cluster0',
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
iftype=constants.INTERFACE_TYPE_ETHERNET,
|
||||
|
@ -1826,9 +1801,11 @@ class TestPostMixin(object):
|
|||
|
||||
# Expected message: Name must be unique
|
||||
def test_create_invalid_ae_name(self):
|
||||
self._create_ethernet('enp0s9', constants.NETWORK_TYPE_NONE)
|
||||
self._create_ethernet('enp0s9', constants.NETWORK_TYPE_NONE,
|
||||
host=self.worker)
|
||||
self._create_bond('enp0s9', constants.NETWORK_TYPE_MGMT,
|
||||
constants.INTERFACE_CLASS_PLATFORM,
|
||||
host=self.worker,
|
||||
expect_errors=True)
|
||||
|
||||
# Expected message:
|
||||
|
@ -2131,6 +2108,27 @@ class TestAIOPatch(InterfaceTestCase):
|
|||
admin=constants.ADMIN_LOCKED)
|
||||
self._create_datanetworks()
|
||||
|
||||
def _setup_sriov_interface_w_numvfs(self, numvfs=5):
|
||||
# create sriov interface
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
interface = dbutils.create_test_interface(forihostid='1')
|
||||
dbutils.create_test_ethernet_port(
|
||||
id=1, name='eth1', host_id=1, interface_id=interface.id,
|
||||
pciaddr='0000:00:00.11', dev_id=0, sriov_totalvfs=5, sriov_numvfs=1,
|
||||
driver='i40e',
|
||||
sriov_vf_driver='i40evf')
|
||||
|
||||
# patch to set numvfs
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
ifclass=constants.INTERFACE_CLASS_PCI_SRIOV,
|
||||
sriov_numvfs=numvfs,
|
||||
expect_errors=False)
|
||||
self.assertEqual(http_client.OK, response.status_int)
|
||||
self.assertEqual(response.json['sriov_numvfs'], numvfs)
|
||||
|
||||
return interface
|
||||
|
||||
# Expected error: Value for number of SR-IOV VFs must be > 0.
|
||||
def test_invalid_sriov_numvfs(self):
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
|
@ -2145,6 +2143,100 @@ class TestAIOPatch(InterfaceTestCase):
|
|||
self.assertIn('Value for number of SR-IOV VFs must be > 0.',
|
||||
response.json['error_message'])
|
||||
|
||||
# Expected error: Number of SR-IOV VFs is specified but
|
||||
# interface class is not pci-sriov.
|
||||
def test_invalid_numvfs_data_class(self):
|
||||
# class data -> class data but with numvfs
|
||||
interface = dbutils.create_test_interface(
|
||||
forihostid='1',
|
||||
ifclass=constants.INTERFACE_CLASS_DATA)
|
||||
|
||||
# case 1: non-sriov class has numvfs
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||
sriov_numvfs=1,
|
||||
expect_errors=True)
|
||||
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertIn('Number of SR-IOV VFs is specified but interface '
|
||||
'class is not pci-sriov.',
|
||||
response.json['error_message'])
|
||||
|
||||
def test_invalid_vf_driver_data_class(self):
|
||||
# class data -> class data but with sriov_vf_driver
|
||||
interface = dbutils.create_test_interface(
|
||||
forihostid='1',
|
||||
ifclass=constants.INTERFACE_CLASS_DATA)
|
||||
|
||||
# case 2: non-sriov class has vf_driver
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||
sriov_vf_driver=constants.SRIOV_DRIVER_TYPE_NETDEVICE,
|
||||
expect_errors=True)
|
||||
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertIn('SR-IOV VF driver is specified but interface '
|
||||
'class is not pci-sriov.',
|
||||
response.json['error_message'])
|
||||
|
||||
def test_invalid_numvfs_sriov_to_data(self):
|
||||
interface = self._setup_sriov_interface_w_numvfs()
|
||||
# patch to change interface class to data with numvfs, and verify bad numvfs
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||
sriov_numvfs=5,
|
||||
expect_errors=True)
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
self.assertIn('Number of SR-IOV VFs is specified but interface class is not pci-sriov',
|
||||
response.json['error_message'])
|
||||
|
||||
def test_invalid_vfdriver_sriov_to_data(self):
|
||||
interface = self._setup_sriov_interface_w_numvfs()
|
||||
# patch to change interface class to data with sriov_vf_driver,
|
||||
# and verify bad sriov_vf_driver
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||
sriov_vf_driver=constants.SRIOV_DRIVER_TYPE_NETDEVICE,
|
||||
expect_errors=True)
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
self.assertIn('SR-IOV VF driver is specified but interface class is not pci-sriov',
|
||||
response.json['error_message'])
|
||||
|
||||
def test_clear_numvfs_when_no_longer_sriov_class(self):
|
||||
interface = self._setup_sriov_interface_w_numvfs()
|
||||
# patch to change interface class to data, and verify numvfs is 0
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||
expect_errors=False)
|
||||
self.assertEqual(http_client.OK, response.status_int)
|
||||
self.assertEqual(response.json["sriov_numvfs"], 0)
|
||||
|
||||
def test_clear_vfdriver_when_no_longer_sriov_class(self):
|
||||
interface = self._setup_sriov_interface_w_numvfs()
|
||||
|
||||
# patch to change interface vf driver to netdevice
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
sriov_vf_driver=constants.SRIOV_DRIVER_TYPE_NETDEVICE,
|
||||
expect_errors=False)
|
||||
self.assertEqual(response.json["sriov_vf_driver"],
|
||||
constants.SRIOV_DRIVER_TYPE_NETDEVICE)
|
||||
|
||||
# patch to change interface class to data, and verify numvfs is 0
|
||||
response = self.patch_dict_json(
|
||||
'%s' % self._get_path(interface['uuid']),
|
||||
ifclass=constants.INTERFACE_CLASS_DATA,
|
||||
expect_errors=False)
|
||||
self.assertEqual(http_client.OK, response.status_int)
|
||||
self.assertEqual(response.json["sriov_vf_driver"], None)
|
||||
|
||||
# Expected error: SR-IOV can't be configured on this interface
|
||||
def test_invalid_sriov_totalvfs_zero(self):
|
||||
self._create_ethernet('mgmt', constants.NETWORK_TYPE_MGMT)
|
||||
|
|
|
@ -134,6 +134,22 @@ class TestKubeUpgrade(base.FunctionalTest, dbbase.BaseHostTestCase):
|
|||
self.mocked_kube_get_version_states.start()
|
||||
self.addCleanup(self.mocked_kube_get_version_states.stop)
|
||||
|
||||
# Mock utility function
|
||||
self.kube_min_version_result, self.kube_max_version_result = 'v1.42.1', 'v1.43.1'
|
||||
|
||||
def mock_get_app_supported_kube_version(app_name, app_version):
|
||||
return self.kube_min_version_result, self.kube_max_version_result
|
||||
self.mocked_kube_min_version = mock.patch(
|
||||
'sysinv.common.utils.get_app_supported_kube_version',
|
||||
mock_get_app_supported_kube_version)
|
||||
self.mocked_kube_max_version = mock.patch(
|
||||
'sysinv.common.utils.get_app_supported_kube_version',
|
||||
mock_get_app_supported_kube_version)
|
||||
self.mocked_kube_min_version.start()
|
||||
self.mocked_kube_max_version.start()
|
||||
self.addCleanup(self.mocked_kube_min_version.stop)
|
||||
self.addCleanup(self.mocked_kube_max_version.stop)
|
||||
|
||||
def _create_controller_0(self, subfunction=None, numa_nodes=1, **kw):
|
||||
return self._create_test_host(
|
||||
personality=constants.CONTROLLER,
|
||||
|
@ -273,6 +289,30 @@ class TestPostKubeUpgrade(TestKubeUpgrade, dbbase.ControllerHostTestCase):
|
|||
self.assertIn("version v1.43.1 is not active",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_installed_app_not_compatible(self):
|
||||
# Test creation of upgrade when the installed application isn't
|
||||
# compatible with the new kubernetes version
|
||||
|
||||
# Create application
|
||||
dbutils.create_test_app(
|
||||
name='stx-openstack',
|
||||
app_version='1.0-19',
|
||||
manifest_name='openstack-armada-manifest',
|
||||
manifest_file='stx-openstack.yaml',
|
||||
status='applied',
|
||||
active=True)
|
||||
|
||||
create_dict = dbutils.post_get_test_kube_upgrade(to_version='v1.43.2')
|
||||
result = self.post_json('/kube_upgrade', create_dict,
|
||||
headers={'User-Agent': 'sysinv-test'},
|
||||
expect_errors=True)
|
||||
|
||||
# Verify the failure
|
||||
self.assertEqual(result.content_type, 'application/json')
|
||||
self.assertEqual(http_client.BAD_REQUEST, result.status_int)
|
||||
self.assertIn("incompatible with the new Kubernetes version v1.43.2",
|
||||
result.json['error_message'])
|
||||
|
||||
def test_create_system_unhealthy(self):
|
||||
# Test creation of upgrade when system health check fails
|
||||
self.fake_conductor_api.get_system_health_return = (
|
||||
|
|
|
@ -116,6 +116,10 @@ class NetworkTestCase(base.FunctionalTest, dbbase.BaseHostTestCase):
|
|||
hostnames, self.cluster_host_subnet,
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST)
|
||||
|
||||
self._create_test_addresses(
|
||||
hostnames, self.storage_subnet,
|
||||
constants.NETWORK_TYPE_STORAGE)
|
||||
|
||||
|
||||
class TestPostMixin(NetworkTestCase):
|
||||
|
||||
|
@ -221,6 +225,12 @@ class TestPostMixin(NetworkTestCase):
|
|||
constants.NETWORK_TYPE_CLUSTER_SERVICE,
|
||||
self.cluster_service_subnet)
|
||||
|
||||
def test_create_success_storage(self):
|
||||
self._test_create_network_success(
|
||||
'storage',
|
||||
constants.NETWORK_TYPE_STORAGE,
|
||||
self.storage_subnet)
|
||||
|
||||
def test_create_fail_duplicate_pxeboot(self):
|
||||
self._test_create_network_fail_duplicate(
|
||||
'pxeboot',
|
||||
|
@ -257,6 +267,12 @@ class TestPostMixin(NetworkTestCase):
|
|||
constants.NETWORK_TYPE_CLUSTER_SERVICE,
|
||||
self.cluster_service_subnet)
|
||||
|
||||
def test_create_fail_duplicate_storage(self):
|
||||
self._test_create_network_fail_duplicate(
|
||||
'storage',
|
||||
constants.NETWORK_TYPE_STORAGE,
|
||||
self.storage_subnet)
|
||||
|
||||
def test_create_with_invalid_type(self):
|
||||
# Test creation with an invalid type
|
||||
address_pool_id = self._create_test_address_pool(
|
||||
|
@ -396,6 +412,14 @@ class TestDelete(NetworkTestCase):
|
|||
constants.NETWORK_TYPE_CLUSTER_SERVICE
|
||||
)
|
||||
|
||||
def test_delete_storage_subnet(self):
|
||||
self._test_delete_allowed(constants.NETWORK_TYPE_STORAGE)
|
||||
|
||||
def test_delete_storage_subnet_after_initial_config(self):
|
||||
self._test_delete_after_initial_config_not_allowed(
|
||||
constants.NETWORK_TYPE_STORAGE
|
||||
)
|
||||
|
||||
def test_delete_data(self):
|
||||
self._test_delete_allowed(constants.NETWORK_TYPE_DATA)
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ class PTPTestCase(base.FunctionalTest):
|
|||
self.ptp = self.dbapi.ptp_get_one()
|
||||
self.ptp_uuid = self.ptp.uuid
|
||||
|
||||
def _get_path(self, ptp_id):
|
||||
def _get_path(self, ptp_id=None):
|
||||
if ptp_id:
|
||||
path = '/ptp/' + ptp_id
|
||||
else:
|
||||
|
@ -124,3 +124,15 @@ class PTPModifyTestCase(PTPTestCase):
|
|||
|
||||
dbutils.create_test_interface(**interface)
|
||||
self.modify_ptp_failure(self.transport_udp, "Invalid system configuration for UDP based PTP transport")
|
||||
|
||||
|
||||
class PTPApplyTestCase(PTPTestCase):
|
||||
def setUp(self):
|
||||
super(PTPApplyTestCase, self).setUp()
|
||||
|
||||
def test_apply_ptp(self):
|
||||
# This is basically a null operation for the API but we should test that the function exists
|
||||
apply_path = self._get_path() + "/apply"
|
||||
# The apply takes no parameters
|
||||
response = self.post_json(apply_path, {})
|
||||
self.assertEqual(http_client.NO_CONTENT, response.status_int)
|
||||
|
|
|
@ -0,0 +1,367 @@
|
|||
#
|
||||
# Copyright (c) 2019 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
"""
|
||||
Tests for the API / service_parameter / methods.
|
||||
"""
|
||||
|
||||
from six.moves import http_client
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
from sysinv.common import constants
|
||||
|
||||
from sysinv.tests.api import base
|
||||
from sysinv.tests.db import base as dbbase
|
||||
from sysinv.tests.db import utils as dbutils
|
||||
|
||||
|
||||
class ApiServiceParameterTestCaseMixin(object):
|
||||
# API_HEADERS are a generic header passed to most API calls
|
||||
API_HEADERS = {'User-Agent': 'sysinv-test',
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json'}
|
||||
|
||||
# API_PREFIX is the prefix for the URL
|
||||
API_PREFIX = '/service_parameter'
|
||||
|
||||
# RESULT_KEY is the python table key for the list of results
|
||||
RESULT_KEY = 'parameters'
|
||||
|
||||
# expected_api_fields are attributes that should be populated by
|
||||
# an API query
|
||||
expected_api_fields = ['uuid',
|
||||
'service',
|
||||
'section',
|
||||
'name',
|
||||
'value',
|
||||
'resource',
|
||||
'personality'
|
||||
]
|
||||
|
||||
required_post_fields = [
|
||||
'service',
|
||||
'section',
|
||||
'parameters'
|
||||
'resource',
|
||||
'personality'
|
||||
]
|
||||
|
||||
# hidden_api_fields are attributes that should not be populated by
|
||||
# an API query
|
||||
hidden_api_fields = []
|
||||
|
||||
service_parameter_data = [
|
||||
{
|
||||
'service': constants.SERVICE_TYPE_HTTP,
|
||||
'section': constants.SERVICE_PARAM_SECTION_HTTP_CONFIG,
|
||||
'name': constants.SERVICE_PARAM_HTTP_PORT_HTTP,
|
||||
'value': str(constants.SERVICE_PARAM_HTTP_PORT_HTTP_DEFAULT)
|
||||
},
|
||||
{
|
||||
'service': constants.SERVICE_TYPE_HTTP,
|
||||
'section': constants.SERVICE_PARAM_SECTION_HTTP_CONFIG,
|
||||
'name': constants.SERVICE_PARAM_HTTP_PORT_HTTPS,
|
||||
'value': str(constants.SERVICE_PARAM_HTTP_PORT_HTTPS_DEFAULT)
|
||||
},
|
||||
{
|
||||
'service': constants.SERVICE_TYPE_KUBERNETES,
|
||||
'section': constants.SERVICE_PARAM_SECTION_KUBERNETES_CERTIFICATES,
|
||||
'name': constants.SERVICE_PARAM_NAME_KUBERNETES_API_SAN_LIST,
|
||||
'value': 'localurl'
|
||||
},
|
||||
{
|
||||
'service': constants.SERVICE_TYPE_KUBERNETES,
|
||||
'section': constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
'name': constants.SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM,
|
||||
'value': 'wad'
|
||||
},
|
||||
{
|
||||
'service': constants.SERVICE_TYPE_KUBERNETES,
|
||||
'section': constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
'name': constants.SERVICE_PARAM_NAME_OIDC_ISSUER_URL,
|
||||
'value': 'https://10.10.10.3:30556/dex'
|
||||
},
|
||||
{
|
||||
'service': constants.SERVICE_TYPE_KUBERNETES,
|
||||
'section': constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
'name': constants.SERVICE_PARAM_NAME_OIDC_CLIENT_ID,
|
||||
'value': 'wad'
|
||||
},
|
||||
{
|
||||
'service': constants.SERVICE_TYPE_KUBERNETES,
|
||||
'section': constants.SERVICE_PARAM_SECTION_KUBERNETES_APISERVER,
|
||||
'name': constants.SERVICE_PARAM_NAME_OIDC_GROUPS_CLAIM,
|
||||
'value': 'wad'
|
||||
}
|
||||
]
|
||||
|
||||
service_parameter_wildcard = {
|
||||
'service': constants.SERVICE_TYPE_PTP,
|
||||
'section': constants.SERVICE_PARAM_SECTION_PTP_GLOBAL,
|
||||
'name': 'network_transport',
|
||||
'value': 'L2'
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
super(ApiServiceParameterTestCaseMixin, self).setUp()
|
||||
|
||||
def get_single_url(self, uuid):
|
||||
return '%s/%s' % (self.API_PREFIX, uuid)
|
||||
|
||||
# These methods have generic names and are overridden here
|
||||
# Future activity: Redo the subclasses to use mixins
|
||||
def assert_fields(self, api_object):
|
||||
# check the uuid is a uuid
|
||||
assert(uuidutils.is_uuid_like(api_object['uuid']))
|
||||
|
||||
# Verify that expected attributes are returned
|
||||
for field in self.expected_api_fields:
|
||||
self.assertIn(field, api_object)
|
||||
|
||||
# Verify that hidden attributes are not returned
|
||||
for field in self.hidden_api_fields:
|
||||
self.assertNotIn(field, api_object)
|
||||
|
||||
def _create_db_object(self, parameter_data=None):
|
||||
if not parameter_data:
|
||||
parameter_data = self.service_parameter_data[0]
|
||||
return dbutils.create_test_service_parameter(**parameter_data)
|
||||
|
||||
def _create_db_objects(self, data_set=None):
|
||||
if not data_set:
|
||||
data_set = self.service_parameter_data
|
||||
data = []
|
||||
for parameter_data in data_set:
|
||||
data.append(self._create_db_object(parameter_data))
|
||||
|
||||
return data
|
||||
|
||||
def get_one(self, uuid, expect_errors=False, error_message=None):
|
||||
response = self.get_json(self.get_single_url(uuid), headers=self.API_HEADERS)
|
||||
self.validate_response(response, expect_errors, error_message, json_response=True)
|
||||
return response
|
||||
|
||||
def get_list(self):
|
||||
response = self.get_json(self.API_PREFIX, headers=self.API_HEADERS)
|
||||
return response[self.RESULT_KEY]
|
||||
|
||||
def patch(self, uuid, data, expect_errors=False, error_message=None):
|
||||
response = self.patch_dict(self.get_single_url(uuid),
|
||||
data=data,
|
||||
expect_errors=expect_errors,
|
||||
headers=self.API_HEADERS)
|
||||
self.validate_response(response, expect_errors, error_message)
|
||||
if expect_errors:
|
||||
return response
|
||||
else:
|
||||
return response.json
|
||||
|
||||
def post(self, data, expect_errors=False, error_message=None):
|
||||
formatted_data = self.format_data(data)
|
||||
response = self.post_json(self.API_PREFIX,
|
||||
params=formatted_data,
|
||||
expect_errors=expect_errors,
|
||||
headers=self.API_HEADERS)
|
||||
|
||||
self.validate_response(response, expect_errors, error_message)
|
||||
if expect_errors:
|
||||
return response
|
||||
else:
|
||||
return response.json[self.RESULT_KEY][0]
|
||||
|
||||
def apply(self, service, expect_errors=False):
|
||||
data = {}
|
||||
data['service'] = service
|
||||
response = self.post_json(self.API_PREFIX + "/apply",
|
||||
params=data,
|
||||
expect_errors=expect_errors,
|
||||
headers=self.API_HEADERS)
|
||||
return response
|
||||
|
||||
def validate_response(self, response, expect_errors, error_message, json_response=False):
|
||||
if expect_errors:
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
if error_message:
|
||||
self.assertIn(error_message, response.json['error_message'])
|
||||
elif not json_response:
|
||||
self.assertEqual(http_client.OK, response.status_int)
|
||||
|
||||
def validate_data(self, input_data, response_data):
|
||||
self.assert_fields(response_data)
|
||||
for key, value in input_data.items():
|
||||
if key in self.expected_api_fields:
|
||||
self.assertEqual(value, response_data[key])
|
||||
|
||||
def format_data(self, data):
|
||||
formatted_data = dict(data)
|
||||
formatted_data.update({'parameters': {data['name']: data['value']}})
|
||||
for field in self.required_post_fields:
|
||||
if field not in formatted_data:
|
||||
formatted_data[field] = None
|
||||
|
||||
return formatted_data
|
||||
|
||||
|
||||
class ApiServiceParameterPostTestSuiteMixin(ApiServiceParameterTestCaseMixin):
|
||||
|
||||
def setUp(self):
|
||||
super(ApiServiceParameterPostTestSuiteMixin, self).setUp()
|
||||
|
||||
def test_create_success(self):
|
||||
# Test creation of object
|
||||
post_object = self.service_parameter_data[0]
|
||||
response = self.post(post_object)
|
||||
self.validate_data(post_object, response)
|
||||
|
||||
def test_create_invalid_service(self):
|
||||
# Test creation with an invalid service name
|
||||
post_object = dict(self.service_parameter_data[0])
|
||||
post_object.update({'service': 'not_valid'})
|
||||
self.post(post_object, expect_errors=True, error_message="Invalid service name")
|
||||
|
||||
def test_create_wildcard_success(self):
|
||||
# Test creation of a section that allows wildcard parameter names
|
||||
post_object = self.service_parameter_wildcard
|
||||
response = self.post(post_object)
|
||||
self.validate_data(post_object, response)
|
||||
|
||||
def test_apply_kubernetes_apiserver_oidc_parameters_semantic(self):
|
||||
# applying kubernetes service parameters with no OIDC parameters
|
||||
# this is a valid configuration
|
||||
response = self.apply('kubernetes')
|
||||
self.assertEqual(http_client.NO_CONTENT, response.status_int)
|
||||
|
||||
# set SERVICE_PARAM_NAME_OIDC_USERNAME_CLAIM. this is an invalid config
|
||||
# valid configs are (none)
|
||||
# (oidc_issuer_url, oidc_client_id, oidc_username_claim)
|
||||
# (the previous 3 plus oidc_groups_claim)
|
||||
post_object = self.service_parameter_data[3]
|
||||
response = self.post(post_object)
|
||||
self.validate_data(post_object, response)
|
||||
response = self.apply('kubernetes', expect_errors=True)
|
||||
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
|
||||
|
||||
# the other 2 valid configs
|
||||
post_object = self.service_parameter_data[4]
|
||||
response = self.post(post_object)
|
||||
self.validate_data(post_object, response)
|
||||
post_object = self.service_parameter_data[5]
|
||||
response = self.post(post_object)
|
||||
self.validate_data(post_object, response)
|
||||
response = self.apply('kubernetes')
|
||||
self.assertEqual(http_client.NO_CONTENT, response.status_int)
|
||||
|
||||
post_object = self.service_parameter_data[6]
|
||||
response = self.post(post_object)
|
||||
self.validate_data(post_object, response)
|
||||
response = self.apply('kubernetes')
|
||||
self.assertEqual(http_client.NO_CONTENT, response.status_int)
|
||||
|
||||
|
||||
class ApiServiceParameterDeleteTestSuiteMixin(ApiServiceParameterTestCaseMixin):
|
||||
""" Tests deletion.
|
||||
Typically delete APIs return NO CONTENT.
|
||||
python2 and python3 libraries may return different
|
||||
content_type (None, or empty json) when NO_CONTENT returned.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super(ApiServiceParameterDeleteTestSuiteMixin, self).setUp()
|
||||
self.delete_object = self._create_db_object()
|
||||
|
||||
# Delete an object and ensure it is removed
|
||||
def test_delete(self):
|
||||
# Delete the API object
|
||||
uuid = self.delete_object.uuid
|
||||
response = self.delete(self.get_single_url(uuid),
|
||||
headers=self.API_HEADERS)
|
||||
|
||||
self.assertEqual(response.status_code, http_client.NO_CONTENT)
|
||||
|
||||
# Verify the object is no longer returned
|
||||
results = self.get_list()
|
||||
returned_uuids = (result.uuid for result in results)
|
||||
self.assertNotIn(uuid, returned_uuids)
|
||||
|
||||
|
||||
class ApiServiceParameterListTestSuiteMixin(ApiServiceParameterTestCaseMixin):
|
||||
""" list operations """
|
||||
|
||||
def test_empty_list(self):
|
||||
results = self.get_list()
|
||||
self.assertEqual([], results)
|
||||
|
||||
def test_single_entry(self):
|
||||
# create a single object
|
||||
single_object = self._create_db_object()
|
||||
uuid = single_object.uuid
|
||||
response = self.get_json(self.get_single_url(uuid))
|
||||
self.validate_data(single_object, response)
|
||||
|
||||
def test_many_entries_in_list(self):
|
||||
db_obj_list = self._create_db_objects()
|
||||
|
||||
response = self.get_list()
|
||||
# Verify that the input data is found in the result
|
||||
response_map = {}
|
||||
for api_object in response:
|
||||
response_map[api_object['uuid']] = api_object
|
||||
for db_oject in db_obj_list:
|
||||
self.validate_data(db_oject, response_map[db_oject.uuid])
|
||||
|
||||
|
||||
class ApiServiceParameterPatchTestSuiteMixin(ApiServiceParameterTestCaseMixin):
|
||||
|
||||
def setUp(self):
|
||||
super(ApiServiceParameterPatchTestSuiteMixin, self).setUp()
|
||||
self.patch_object = self._create_db_object()
|
||||
|
||||
def test_patch_valid(self):
|
||||
# Update value of patchable field
|
||||
new_data = {'value': '8077'}
|
||||
response = self.patch(self.patch_object.uuid, new_data)
|
||||
# Verify that the attribute was updated
|
||||
self.patch_object.update(new_data)
|
||||
self.validate_data(self.patch_object, response)
|
||||
|
||||
def test_patch_invalid_value(self):
|
||||
# Pass a value that fails a semantic check when patched by the API
|
||||
new_data = {'value': 'a_string'}
|
||||
self.patch(self.patch_object.uuid, new_data, expect_errors=True,
|
||||
error_message="must be an integer value")
|
||||
|
||||
def test_patch_wildcard_success(self):
|
||||
# Test modification of a section that allows wildcard parameter names
|
||||
wildcard_object = self._create_db_object(self.service_parameter_wildcard)
|
||||
new_data = {'value': 'UDPv4'}
|
||||
response = self.patch(wildcard_object.uuid, new_data)
|
||||
wildcard_object.update(new_data)
|
||||
self.validate_data(wildcard_object, response)
|
||||
|
||||
|
||||
class PlatformIPv4ControllerApiServiceParameterDeleteTestCase(ApiServiceParameterDeleteTestSuiteMixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ProvisionedControllerHostTestCase):
|
||||
pass
|
||||
|
||||
|
||||
class PlatformIPv4ControllerApiServiceParameterListTestCase(ApiServiceParameterListTestSuiteMixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ProvisionedControllerHostTestCase):
|
||||
pass
|
||||
|
||||
|
||||
class PlatformIPv4ControllerApiServiceParameterPostTestCase(ApiServiceParameterPostTestSuiteMixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ProvisionedControllerHostTestCase):
|
||||
pass
|
||||
|
||||
|
||||
class PlatformIPv4ControllerApiServiceParameterPatchTestCase(ApiServiceParameterPatchTestSuiteMixin,
|
||||
base.FunctionalTest,
|
||||
dbbase.ProvisionedControllerHostTestCase):
|
||||
pass
|
|
@ -621,9 +621,12 @@ class StorageTierDependentTCs(base.FunctionalTest):
|
|||
self._create_storage_mon('storage-0', storage_0['id'])
|
||||
|
||||
# Mock the fsid call so that we don't have to wait for the timeout
|
||||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
with nested(mock.patch.object(ceph.CephWrapper, 'fsid'),
|
||||
mock.patch.object(ceph_utils, 'fix_crushmap')) as (mock_fsid, mock_fix_crushmap):
|
||||
mock_fix_crushmap.return_value = True
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=False), None)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
self.assertIsNone(self.service._ceph.cluster_ceph_uuid)
|
||||
self.assertIsNotNone(self.service._ceph.cluster_db_uuid)
|
||||
|
|
|
@ -30,7 +30,6 @@ import copy
|
|||
import fixtures
|
||||
import mock
|
||||
import os
|
||||
import shutil
|
||||
import testtools
|
||||
|
||||
from oslo_config import cfg
|
||||
|
@ -38,60 +37,47 @@ from oslo_db.sqlalchemy import enginefacade
|
|||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from sysinv.common import paths
|
||||
from sysinv.db import api as dbapi
|
||||
from sysinv.db import migration
|
||||
from sysinv.db import migration as db_migration
|
||||
from sysinv.db.sqlalchemy import migration
|
||||
|
||||
import sysinv.helm.utils
|
||||
from sysinv.objects import base as objects_base
|
||||
from sysinv.tests import conf_fixture
|
||||
from sysinv.tests import policy_fixture
|
||||
|
||||
sys.modules['fm_core'] = mock.Mock()
|
||||
sys.modules['rpm'] = mock.Mock()
|
||||
|
||||
CONF = cfg.CONF
|
||||
_DB_CACHE = None
|
||||
|
||||
|
||||
sys.modules['fm_core'] = mock.Mock()
|
||||
sys.modules['rpm'] = mock.Mock()
|
||||
|
||||
|
||||
class Database(fixtures.Fixture):
|
||||
|
||||
def __init__(self, engine, db_migrate, sql_connection,
|
||||
sqlite_db, sqlite_clean_db):
|
||||
def __init__(self, engine, db_migrate, sql_connection):
|
||||
self.sql_connection = sql_connection
|
||||
self.sqlite_db = sqlite_db
|
||||
self.sqlite_clean_db = sqlite_clean_db
|
||||
|
||||
self.engine = engine
|
||||
self.engine.dispose()
|
||||
conn = self.engine.connect()
|
||||
if sql_connection == "sqlite://":
|
||||
if db_migrate.db_version() > db_migrate.INIT_VERSION:
|
||||
return
|
||||
else:
|
||||
testdb = paths.state_path_rel(sqlite_db)
|
||||
if os.path.exists(testdb):
|
||||
return
|
||||
db_migrate.db_sync()
|
||||
self.setup_sqlite(db_migrate)
|
||||
|
||||
self.post_migrations()
|
||||
if sql_connection == "sqlite://":
|
||||
conn = self.engine.connect()
|
||||
self._DB = "".join(line for line in conn.connection.iterdump())
|
||||
self.engine.dispose()
|
||||
else:
|
||||
cleandb = paths.state_path_rel(sqlite_clean_db)
|
||||
shutil.copyfile(testdb, cleandb)
|
||||
self._DB = "".join(line for line in conn.connection.iterdump())
|
||||
self.engine.dispose()
|
||||
|
||||
def setup_sqlite(self, db_migrate):
|
||||
if db_migrate.db_version() > db_migration.INIT_VERSION:
|
||||
return
|
||||
db_migrate.db_sync()
|
||||
|
||||
def setUp(self):
|
||||
super(Database, self).setUp()
|
||||
|
||||
if self.sql_connection == "sqlite://":
|
||||
conn = self.engine.connect()
|
||||
conn.connection.executescript(self._DB)
|
||||
self.addCleanup(self.engine.dispose)
|
||||
else:
|
||||
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
|
||||
paths.state_path_rel(self.sqlite_db))
|
||||
conn = self.engine.connect()
|
||||
conn.connection.executescript(self._DB)
|
||||
self.addCleanup(self.engine.dispose)
|
||||
|
||||
def post_migrations(self):
|
||||
"""Any addition steps that are needed outside of the migrations."""
|
||||
|
@ -160,15 +146,10 @@ class TestCase(testtools.TestCase):
|
|||
logging.register_options(CONF)
|
||||
|
||||
self.useFixture(conf_fixture.ConfFixture(CONF))
|
||||
|
||||
global _DB_CACHE
|
||||
if not _DB_CACHE:
|
||||
engine = enginefacade.get_legacy_facade().get_engine()
|
||||
_DB_CACHE = Database(engine, migration,
|
||||
sql_connection=CONF.database.connection,
|
||||
sqlite_db='sysinv.sqlite',
|
||||
sqlite_clean_db='clean.sqlite')
|
||||
self.useFixture(_DB_CACHE)
|
||||
# The fixture config is not setup when the DB_CACHE below is being constructed
|
||||
self.config(connection="sqlite://",
|
||||
sqlite_synchronous=False,
|
||||
group='database')
|
||||
|
||||
# NOTE(danms): Make sure to reset us back to non-remote objects
|
||||
# for each test to avoid interactions. Also, backup the object
|
||||
|
@ -183,6 +164,13 @@ class TestCase(testtools.TestCase):
|
|||
self.policy = self.useFixture(policy_fixture.PolicyFixture())
|
||||
CONF.set_override('fatal_exception_format_errors', True)
|
||||
|
||||
global _DB_CACHE
|
||||
if not _DB_CACHE:
|
||||
engine = enginefacade.get_legacy_facade().get_engine()
|
||||
_DB_CACHE = Database(engine, migration,
|
||||
sql_connection=CONF.database.connection)
|
||||
self.useFixture(_DB_CACHE)
|
||||
|
||||
def tearDown(self):
|
||||
super(TestCase, self).tearDown()
|
||||
self.helm_refresh_patcher.stop()
|
||||
|
|
|
@ -878,3 +878,16 @@ class TestKubeOperator(base.TestCase):
|
|||
|
||||
result = self.kube_operator.kube_get_kubernetes_version()
|
||||
assert result is None
|
||||
|
||||
|
||||
class TestKubernetesUtilities(base.TestCase):
|
||||
def test_is_kube_version_supported(self):
|
||||
self.assertTrue(kube.is_kube_version_supported('v1.42.3', 'v1.42.1', 'v1.43.1'))
|
||||
self.assertTrue(kube.is_kube_version_supported('v1.42.3', 'v1.42.3', 'v1.42.3'))
|
||||
self.assertTrue(kube.is_kube_version_supported('v1.42.3', 'v1.42.1', None))
|
||||
self.assertTrue(kube.is_kube_version_supported('v1.42.3', None, 'v1.43.1'))
|
||||
self.assertTrue(kube.is_kube_version_supported('v1.42.3', None, None))
|
||||
self.assertFalse(kube.is_kube_version_supported('v1.42.3', 'v1.42.1', 'v1.42.2'))
|
||||
self.assertFalse(kube.is_kube_version_supported('v1.42.3', 'v1.42.2', 'v1.42.2'))
|
||||
self.assertFalse(kube.is_kube_version_supported('v1.42.3', 'v1.43.1', None))
|
||||
self.assertFalse(kube.is_kube_version_supported('v1.42.3', None, 'v1.41.5'))
|
||||
|
|
|
@ -13,6 +13,7 @@ import mock
|
|||
from cephclient import wrapper as ceph
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from sysinv.common import ceph as cceph
|
||||
from sysinv.common import constants
|
||||
from sysinv.conductor import manager
|
||||
from sysinv.conductor import ceph as iceph
|
||||
|
@ -43,6 +44,8 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
|
||||
upgrade_downgrade_kube_components_patcher = mock.patch.object(
|
||||
manager.ConductorManager, '_upgrade_downgrade_kube_components')
|
||||
fix_crushmap_patcher = mock.patch.object(
|
||||
cceph, 'fix_crushmap')
|
||||
|
||||
def setUp(self):
|
||||
super(UpdateCephCluster, self).setUp()
|
||||
|
@ -55,10 +58,13 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
self.host_index = -1
|
||||
|
||||
self.mock_upgrade_downgrade_kube_components = self.upgrade_downgrade_kube_components_patcher.start()
|
||||
self.mock_fix_crushmap = self.fix_crushmap_patcher.start()
|
||||
self.mock_fix_crushmap.return_value = True
|
||||
|
||||
def tearDown(self):
|
||||
super(UpdateCephCluster, self).tearDown()
|
||||
self.upgrade_downgrade_kube_components_patcher.stop()
|
||||
self.fix_crushmap_patcher.stop()
|
||||
|
||||
def _create_storage_ihost(self, hostname):
|
||||
self.host_index += 1
|
||||
|
@ -81,6 +87,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=False), None)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
self.assertIsNone(self.service._ceph.cluster_ceph_uuid)
|
||||
self.assertIsNotNone(self.service._ceph.cluster_db_uuid)
|
||||
|
@ -92,6 +99,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
self.assertIsNotNone(self.service._ceph.cluster_ceph_uuid)
|
||||
self.assertIsNotNone(self.service._ceph.cluster_db_uuid)
|
||||
|
@ -106,6 +114,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=False), None)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
self.assertIsNone(self.service._ceph.cluster_ceph_uuid)
|
||||
self.assertIsNotNone(self.service._ceph.cluster_db_uuid)
|
||||
|
@ -135,6 +144,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=False), None)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
self.assertIsNone(self.service._ceph.cluster_ceph_uuid)
|
||||
|
@ -164,6 +174,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
clusters = self.dbapi.clusters_get_all(type=constants.CINDER_BACKEND_CEPH)
|
||||
|
@ -188,6 +199,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=False), None)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
self.assertIsNone(self.service._ceph.cluster_ceph_uuid)
|
||||
|
@ -225,6 +237,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
clusters = self.dbapi.clusters_get_all(type=constants.CINDER_BACKEND_CEPH)
|
||||
|
@ -259,6 +272,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
clusters = self.dbapi.clusters_get_all(type=constants.CINDER_BACKEND_CEPH)
|
||||
|
@ -326,6 +340,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
for h in hosts:
|
||||
|
@ -381,6 +396,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
for h in hosts:
|
||||
|
@ -398,6 +414,7 @@ class UpdateCephCluster(base.DbTestCase):
|
|||
with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
|
||||
mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
|
||||
self.service.start()
|
||||
self.service._init_ceph_cluster_info()
|
||||
mock_fsid.assert_called()
|
||||
|
||||
storage_0 = self._create_storage_ihost('storage-0')
|
||||
|
|
|
@ -1426,3 +1426,59 @@ class ManagerTestCase(base.DbTestCase):
|
|||
updated_port = self.dbapi.ethernet_port_get(port1['uuid'], host_id)
|
||||
|
||||
self.assertEqual(updated_port['node_id'], 3)
|
||||
|
||||
|
||||
class ManagerTestCaseInternal(base.BaseHostTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ManagerTestCaseInternal, self).setUp()
|
||||
|
||||
# Set up objects for testing
|
||||
self.service = manager.ConductorManager('test-host', 'test-topic')
|
||||
self.service.dbapi = dbapi.get_instance()
|
||||
|
||||
def test_remove_lease_for_address(self):
|
||||
# create test interface
|
||||
ihost = self._create_test_host(
|
||||
personality=constants.WORKER,
|
||||
administrative=constants.ADMIN_UNLOCKED)
|
||||
iface = utils.create_test_interface(
|
||||
ifname="test0",
|
||||
ifclass=constants.INTERFACE_CLASS_PLATFORM,
|
||||
forihostid=ihost.id,
|
||||
ihost_uuid=ihost.uuid)
|
||||
network = self.dbapi.network_get_by_type(constants.NETWORK_TYPE_MGMT)
|
||||
utils.create_test_interface_network(
|
||||
interface_id=iface.id,
|
||||
network_id=network.id)
|
||||
|
||||
# create test address associated with interface
|
||||
address_name = cutils.format_address_name(ihost.hostname,
|
||||
network.type)
|
||||
self.dbapi.address_create({
|
||||
'name': address_name,
|
||||
'family': self.oam_subnet.version,
|
||||
'prefix': self.oam_subnet.prefixlen,
|
||||
'address': str(self.oam_subnet[24]),
|
||||
'interface_id': iface.id,
|
||||
'enable_dad': self.oam_subnet.version == 6
|
||||
})
|
||||
|
||||
# stub the system i/o calls
|
||||
self.mock_objs = [
|
||||
mock.patch.object(
|
||||
manager.ConductorManager, '_find_local_interface_name',
|
||||
lambda x, y: iface.ifname),
|
||||
mock.patch('sysinv.common.utils.get_dhcp_cid',
|
||||
lambda x, y, z: None),
|
||||
mock.patch.object(
|
||||
manager.ConductorManager, '_dhcp_release',
|
||||
lambda a, b, c, d, e: None)
|
||||
]
|
||||
|
||||
for mock_obj in self.mock_objs:
|
||||
mock_obj.start()
|
||||
self.addCleanup(mock_obj.stop)
|
||||
|
||||
self.service._remove_lease_for_address(ihost.hostname,
|
||||
constants.NETWORK_TYPE_MGMT)
|
||||
|
|
|
@ -41,7 +41,7 @@ class ConfFixture(config_fixture.Config):
|
|||
self.conf.set_default('rpc_cast_timeout', 5)
|
||||
self.conf.set_default('rpc_response_timeout', 5)
|
||||
self.conf.set_default('connection', "sqlite://", group='database')
|
||||
self.conf.set_default('sqlite_synchronous', False)
|
||||
self.conf.set_default('sqlite_synchronous', False, group='database')
|
||||
self.conf.set_default('use_ipv6', True)
|
||||
config.parse_args([], default_config_files=[])
|
||||
self.addCleanup(self.conf.reset)
|
||||
|
|
|
@ -47,6 +47,7 @@ class BaseIPv4Mixin(object):
|
|||
cluster_pod_subnet = netaddr.IPNetwork('172.16.0.0/16')
|
||||
cluster_service_subnet = netaddr.IPNetwork('10.96.0.0/12')
|
||||
multicast_subnet = netaddr.IPNetwork('239.1.1.0/28')
|
||||
storage_subnet = netaddr.IPNetwork('10.10.20.0/24')
|
||||
|
||||
nameservers = ['8.8.8.8', '8.8.4.4']
|
||||
|
||||
|
@ -63,6 +64,7 @@ class BaseIPv6Mixin(object):
|
|||
cluster_pod_subnet = netaddr.IPNetwork('fd03::/64')
|
||||
cluster_service_subnet = netaddr.IPNetwork('fd04::/112')
|
||||
multicast_subnet = netaddr.IPNetwork('ff08::1:1:0/124')
|
||||
storage_subnet = netaddr.IPNetwork('fd05::/64')
|
||||
|
||||
nameservers = ['2001:4860:4860::8888', '2001:4860:4860::8844']
|
||||
|
||||
|
@ -234,6 +236,10 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase):
|
|||
constants.NETWORK_TYPE_CLUSTER_SERVICE,
|
||||
self.cluster_service_subnet)
|
||||
|
||||
self._create_test_network('storage',
|
||||
constants.NETWORK_TYPE_STORAGE,
|
||||
self.storage_subnet)
|
||||
|
||||
def _create_test_addresses(self, hostnames, subnet, network_type,
|
||||
start=1, stop=None):
|
||||
ips = itertools.islice(subnet, start, stop)
|
||||
|
@ -276,6 +282,10 @@ class BaseSystemTestCase(BaseIPv4Mixin, DbTestCase):
|
|||
hostnames, self.cluster_host_subnet,
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST)
|
||||
|
||||
self._create_test_addresses(
|
||||
hostnames, self.storage_subnet,
|
||||
constants.NETWORK_TYPE_STORAGE)
|
||||
|
||||
def _create_test_oam(self):
|
||||
self.oam = dbutils.create_test_oam()
|
||||
|
||||
|
@ -388,8 +398,9 @@ class BaseHostTestCase(BaseSystemTestCase):
|
|||
def _create_test_host_platform_interface(self, host):
|
||||
network_types = [constants.NETWORK_TYPE_OAM,
|
||||
constants.NETWORK_TYPE_MGMT,
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST]
|
||||
ifnames = ['oam', 'mgmt', 'cluster']
|
||||
constants.NETWORK_TYPE_CLUSTER_HOST,
|
||||
constants.NETWORK_TYPE_STORAGE]
|
||||
ifnames = ['oam', 'mgmt', 'cluster', 'storage']
|
||||
index = 0
|
||||
ifaces = []
|
||||
for nt, name in zip(network_types, ifnames):
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2013-2019 Wind River Systems, Inc.
|
||||
# Copyright (c) 2013-2020 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
"""Sysinv test utilities."""
|
||||
|
@ -325,6 +325,31 @@ def create_test_kube_host_upgrade():
|
|||
return dbapi.kube_host_upgrade_create(hostid, upgrade)
|
||||
|
||||
|
||||
# Create test controller file system object
|
||||
def get_test_controller_fs(**kw):
|
||||
controller_fs = {
|
||||
'id': kw.get('id'),
|
||||
'uuid': kw.get('uuid'),
|
||||
'name': kw.get('name'),
|
||||
'forisystemid': kw.get('forisystemid', None),
|
||||
'state': kw.get('state'),
|
||||
'size': kw.get('size'),
|
||||
'logical_volume': kw.get('logical_volume'),
|
||||
'replicated': kw.get('replicated'),
|
||||
'isystem_uuid': kw.get('isystem_uuid', None)
|
||||
}
|
||||
return controller_fs
|
||||
|
||||
|
||||
def create_test_controller_fs(**kw):
|
||||
controller_fs = get_test_controller_fs(**kw)
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del controller_fs['id']
|
||||
dbapi = db_api.get_instance()
|
||||
return dbapi.controller_fs_create(controller_fs)
|
||||
|
||||
|
||||
# Create test user object
|
||||
def get_test_user(**kw):
|
||||
user = {
|
||||
|
@ -725,11 +750,35 @@ def get_test_mon(**kw):
|
|||
return mon
|
||||
|
||||
|
||||
def get_test_host_fs(**kw):
|
||||
host_fs = {
|
||||
'id': kw.get('id', 2),
|
||||
'uuid': kw.get('uuid'),
|
||||
'name': kw.get('name'),
|
||||
'size': kw.get('size', 2029),
|
||||
'logical_volume': kw.get('logical_volume', 'scratch-lv'),
|
||||
'forihostid': kw.get('forihostid', 1),
|
||||
}
|
||||
return host_fs
|
||||
|
||||
|
||||
def create_test_host_fs(**kw):
|
||||
host_fs = get_test_host_fs(**kw)
|
||||
if 'uuid' not in kw:
|
||||
del host_fs['uuid']
|
||||
dbapi = db_api.get_instance()
|
||||
forihostid = host_fs['forihostid']
|
||||
return dbapi.host_fs_create(forihostid, host_fs)
|
||||
|
||||
|
||||
def get_test_lvg(**kw):
|
||||
lvg = {
|
||||
'id': kw.get('id', 2),
|
||||
'uuid': kw.get('uuid'),
|
||||
'lvm_vg_name': kw.get('lvm_vg_name'),
|
||||
'lvm_vg_size': kw.get('lvm_vg_size', 202903650304),
|
||||
'lvm_vg_total_pe': kw.get('lvm_vg_total_pe', 6047),
|
||||
'lvm_vg_free_pe': kw.get('lvm_vg_free_pe', 1541),
|
||||
'forihostid': kw.get('forihostid', 2),
|
||||
}
|
||||
return lvg
|
||||
|
@ -754,6 +803,7 @@ def get_test_pv(**kw):
|
|||
pv = {
|
||||
'id': kw.get('id', 2),
|
||||
'uuid': kw.get('uuid'),
|
||||
'pv_state': kw.get('pv_state', 'unprovisioned'),
|
||||
'lvm_vg_name': kw.get('lvm_vg_name'),
|
||||
'disk_or_part_uuid': kw.get('disk_or_part_uuid', str(uuid.uuid4())),
|
||||
'disk_or_part_device_path': kw.get('disk_or_part_device_path',
|
||||
|
@ -1297,6 +1347,49 @@ def create_test_label(**kw):
|
|||
return dbapi.label_create(label['host_id'], label)
|
||||
|
||||
|
||||
def get_test_service_parameter(**kw):
|
||||
service_parameter = {
|
||||
'section': kw.get('section'),
|
||||
'service': kw.get('service'),
|
||||
'name': kw.get('name'),
|
||||
'value': kw.get('value'),
|
||||
'resource': kw.get('resource'),
|
||||
'personality': kw.get('personality'),
|
||||
}
|
||||
return service_parameter
|
||||
|
||||
|
||||
def create_test_service_parameter(**kw):
|
||||
"""Create test service parameter in DB and return a service_parameter object.
|
||||
Function to be used to create test service parameter objects in the database.
|
||||
:param kw: kwargs with overriding values for service parameter's attributes.
|
||||
:returns: Test service parameter DB object.
|
||||
"""
|
||||
service_parameter = get_test_service_parameter(**kw)
|
||||
dbapi = db_api.get_instance()
|
||||
return dbapi.service_parameter_create(service_parameter)
|
||||
|
||||
|
||||
def create_test_oam(**kw):
|
||||
dbapi = db_api.get_instance()
|
||||
return dbapi.iextoam_get_one()
|
||||
|
||||
|
||||
# Create test certficate object
|
||||
def get_test_certificate(**kw):
|
||||
certificate = {
|
||||
'id': kw.get('id'),
|
||||
'uuid': kw.get('uuid'),
|
||||
'certtype': kw.get('certtype'),
|
||||
'signature': kw.get('signature')
|
||||
}
|
||||
return certificate
|
||||
|
||||
|
||||
def create_test_certificate(**kw):
|
||||
certificate = get_test_certificate(**kw)
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del certificate['id']
|
||||
dbapi = db_api.get_instance()
|
||||
return dbapi.certificate_create(certificate)
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
from sysinv.helm import nova
|
||||
from sysinv.helm import helm
|
||||
from sysinv.common import constants
|
||||
|
||||
from sysinv.tests.db import base as dbbase
|
||||
|
||||
|
||||
class NovaGetOverrideTest(dbbase.ControllerHostTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NovaGetOverrideTest, self).setUp()
|
||||
self.operator = helm.HelmOperator(self.dbapi)
|
||||
self.nova = nova.NovaHelm(self.operator)
|
||||
self.worker = self._create_test_host(
|
||||
personality=constants.WORKER,
|
||||
administrative=constants.ADMIN_LOCKED)
|
||||
self.ifaces = self._create_test_host_platform_interface(self.worker)
|
||||
self.dbapi.address_create({
|
||||
'name': 'test',
|
||||
'family': self.oam_subnet.version,
|
||||
'prefix': self.oam_subnet.prefixlen,
|
||||
'address': str(self.oam_subnet[24]),
|
||||
'interface_id': self.ifaces[0].id,
|
||||
'enable_dad': self.oam_subnet.version == 6
|
||||
})
|
||||
|
||||
def test_update_host_addresses(self):
|
||||
self.nova._update_host_addresses(self.worker, {}, {}, {})
|
|
@ -33,7 +33,7 @@ class OidcClientTestCase(test_helm.StxPlatformAppMixin,
|
|||
parameters = {
|
||||
'config': {
|
||||
'issuer': 'https://%s:30556/dex' % oam_url,
|
||||
'redirect_uri': "http://%s:30555/callback" % oam_url,
|
||||
'redirect_uri': "https://%s:30555/callback" % oam_url,
|
||||
}
|
||||
}
|
||||
self.assertOverridesParameters(overrides, parameters)
|
||||
|
|
|
@ -719,6 +719,36 @@ class InterfaceTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase):
|
|||
self.context, self.iface, network.id)
|
||||
self.assertEqual(method, 'static')
|
||||
|
||||
def test_get_interface_address_method_for_platform_ipv4(self):
|
||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||
self.iface['ipv4_mode'] = constants.IPV4_STATIC
|
||||
self.iface['networktype'] = constants.NETWORK_TYPE_NONE
|
||||
method = interface.get_interface_address_method(
|
||||
self.context, self.iface)
|
||||
self.assertEqual(method, 'static')
|
||||
|
||||
def test_get_interface_address_method_for_platform_ipv6(self):
|
||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||
self.iface['ipv6_mode'] = constants.IPV6_STATIC
|
||||
self.iface['networktype'] = constants.NETWORK_TYPE_NONE
|
||||
method = interface.get_interface_address_method(
|
||||
self.context, self.iface)
|
||||
self.assertEqual(method, 'static')
|
||||
|
||||
def test_get_interface_address_method_for_platform_invalid(self):
|
||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||
self.iface['ipv4_mode'] = constants.IPV4_STATIC
|
||||
self.iface['networktype'] = constants.NETWORK_TYPE_OAM
|
||||
self.iface['networks'] = self._get_network_ids_by_type(
|
||||
constants.NETWORK_TYPE_OAM)
|
||||
self.host['personality'] = constants.WORKER
|
||||
self._update_context()
|
||||
network = self.dbapi.network_get_by_type(
|
||||
constants.NETWORK_TYPE_OAM)
|
||||
method = interface.get_interface_address_method(
|
||||
self.context, self.iface, network.id)
|
||||
self.assertEqual(method, 'dhcp')
|
||||
|
||||
def test_get_interface_traffic_classifier_for_mgmt(self):
|
||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PLATFORM
|
||||
self.iface['networktypelist'] = [constants.NETWORK_TYPE_MGMT]
|
||||
|
@ -767,6 +797,16 @@ class InterfaceTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase):
|
|||
value = interface.get_sriov_interface_port(self.context, vf)
|
||||
self.assertEqual(value, port)
|
||||
|
||||
def test_get_sriov_interface_port_invalid(self):
|
||||
port, iface = self._create_ethernet_test('pthru',
|
||||
constants.INTERFACE_CLASS_PCI_PASSTHROUGH,
|
||||
constants.NETWORK_TYPE_PCI_PASSTHROUGH)
|
||||
self._update_context()
|
||||
self.assertRaises(AssertionError,
|
||||
interface.get_sriov_interface_port,
|
||||
self.context,
|
||||
iface)
|
||||
|
||||
def test_get_sriov_interface_vf_addrs(self):
|
||||
vf_addr1 = "0000:81:00.0"
|
||||
vf_addr2 = "0000:81:01.0"
|
||||
|
@ -1073,10 +1113,13 @@ class InterfaceTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase):
|
|||
|
||||
def _get_sriov_config(self, ifname='default',
|
||||
vf_driver=constants.SRIOV_DRIVER_TYPE_VFIO,
|
||||
vf_addrs=None):
|
||||
vf_addrs=None, num_vfs=2,
|
||||
pf_addr=None):
|
||||
if vf_addrs is None:
|
||||
vf_addrs = [""]
|
||||
vf_addrs = []
|
||||
config = {'ifname': ifname,
|
||||
'pf_addr': pf_addr if pf_addr else self.port['pciaddr'],
|
||||
'num_vfs': num_vfs,
|
||||
'vf_driver': vf_driver,
|
||||
'vf_addrs': vf_addrs}
|
||||
return config
|
||||
|
@ -1362,13 +1405,16 @@ class InterfaceTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase):
|
|||
print(expected)
|
||||
self.assertEqual(expected, config)
|
||||
|
||||
def _create_sriov_vf_driver_config(self, iface_vf_driver, port_vf_driver, vf_addr_list):
|
||||
def _create_sriov_vf_config(self, iface_vf_driver, port_vf_driver,
|
||||
vf_addr_list, num_vfs):
|
||||
self.iface['ifclass'] = constants.INTERFACE_CLASS_PCI_SRIOV
|
||||
self.iface['networktype'] = constants.NETWORK_TYPE_PCI_SRIOV
|
||||
self.iface['sriov_vf_driver'] = iface_vf_driver
|
||||
self.iface['sriov_numvfs'] = num_vfs
|
||||
self.port['sriov_vf_driver'] = port_vf_driver
|
||||
self.port['sriov_vfs_pci_address'] = vf_addr_list
|
||||
self._update_context()
|
||||
|
||||
config = interface.get_sriov_config(self.context, self.iface)
|
||||
return config
|
||||
|
||||
|
@ -1376,39 +1422,62 @@ class InterfaceTestCase(InterfaceTestCaseMixin, dbbase.BaseHostTestCase):
|
|||
vf_addr1 = "0000:81:00.0"
|
||||
vf_addr2 = "0000:81:01.0"
|
||||
vf_addr_list = "{},{}".format(vf_addr1, vf_addr2)
|
||||
num_vfs = 2
|
||||
|
||||
config = self._create_sriov_vf_driver_config(
|
||||
constants.SRIOV_DRIVER_TYPE_NETDEVICE, 'i40evf', vf_addr_list)
|
||||
config = self._create_sriov_vf_config(
|
||||
constants.SRIOV_DRIVER_TYPE_NETDEVICE, 'i40evf', vf_addr_list,
|
||||
num_vfs)
|
||||
expected = self._get_sriov_config(
|
||||
self.iface['ifname'], 'i40evf',
|
||||
[quoted_str(vf_addr1),
|
||||
quoted_str(vf_addr2)])
|
||||
quoted_str(vf_addr2)],
|
||||
num_vfs)
|
||||
self.assertEqual(expected, config)
|
||||
|
||||
def test_get_sriov_config_vfio(self):
|
||||
vf_addr1 = "0000:81:00.0"
|
||||
vf_addr2 = "0000:81:01.0"
|
||||
vf_addr_list = "{},{}".format(vf_addr1, vf_addr2)
|
||||
num_vfs = 4
|
||||
|
||||
config = self._create_sriov_vf_driver_config(
|
||||
constants.SRIOV_DRIVER_TYPE_VFIO, 'i40evf', vf_addr_list)
|
||||
config = self._create_sriov_vf_config(
|
||||
constants.SRIOV_DRIVER_TYPE_VFIO, 'i40evf', vf_addr_list,
|
||||
num_vfs)
|
||||
expected = self._get_sriov_config(
|
||||
self.iface['ifname'], 'vfio-pci',
|
||||
[quoted_str(vf_addr1),
|
||||
quoted_str(vf_addr2)])
|
||||
quoted_str(vf_addr2)],
|
||||
num_vfs)
|
||||
self.assertEqual(expected, config)
|
||||
|
||||
def test_get_sriov_config_default(self):
|
||||
vf_addr1 = "0000:81:00.0"
|
||||
vf_addr2 = "0000:81:01.0"
|
||||
vf_addr_list = "{},{}".format(vf_addr1, vf_addr2)
|
||||
num_vfs = 1
|
||||
|
||||
config = self._create_sriov_vf_driver_config(
|
||||
None, 'i40evf', vf_addr_list)
|
||||
config = self._create_sriov_vf_config(
|
||||
None, 'i40evf', vf_addr_list, num_vfs)
|
||||
expected = self._get_sriov_config(
|
||||
self.iface['ifname'], None,
|
||||
[quoted_str(vf_addr1),
|
||||
quoted_str(vf_addr2)])
|
||||
quoted_str(vf_addr2)],
|
||||
num_vfs)
|
||||
self.assertEqual(expected, config)
|
||||
|
||||
def test_get_sriov_config_iftype_vf(self):
|
||||
port, iface = self._create_ethernet_test(
|
||||
'sriov1', constants.INTERFACE_CLASS_PCI_SRIOV,
|
||||
constants.NETWORK_TYPE_PCI_SRIOV, sriov_numvfs=2,
|
||||
sriov_vf_driver=None)
|
||||
vf = self._create_vf_test("vf1", 1, None, lower_iface=iface)
|
||||
self._update_context()
|
||||
|
||||
config = interface.get_sriov_config(self.context, vf)
|
||||
expected = self._get_sriov_config(
|
||||
vf['ifname'], None,
|
||||
None,
|
||||
None, pf_addr=port['pciaddr'])
|
||||
self.assertEqual(expected, config)
|
||||
|
||||
def test_is_a_mellanox_cx3_device_false(self):
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script allows a developer to setup their DB for opportunistic tests
|
||||
# openstack_citest is used by oslo_db for opportunistic db tests.
|
||||
# This method is based on code in neutron/tools
|
||||
|
||||
# Set env variable for MYSQL_PASSWORD
|
||||
MYSQL_PASSWORD=${MYSQL_PASSWORD:-stackdb}
|
||||
|
||||
function _install_mysql {
|
||||
echo "Installing MySQL database"
|
||||
|
||||
|
||||
# Set up the 'openstack_citest' user and database in postgres
|
||||
tmp_dir=$(mktemp -d)
|
||||
trap "rm -rf $tmp_dir" EXIT
|
||||
|
||||
cat << EOF > $tmp_dir/mysql.sql
|
||||
DROP DATABASE IF EXISTS openstack_citest;
|
||||
CREATE DATABASE openstack_citest;
|
||||
CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest';
|
||||
CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest';
|
||||
GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost';
|
||||
GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest';
|
||||
FLUSH PRIVILEGES;
|
||||
EOF
|
||||
/usr/bin/mysql -u root -p"$MYSQL_PASSWORD" < $tmp_dir/mysql.sql
|
||||
|
||||
}
|
||||
|
||||
function _install_postgres {
|
||||
echo "Installing Postgres database"
|
||||
|
||||
tmp_dir=$(mktemp -d)
|
||||
trap "rm -rf $tmp_dir" EXIT
|
||||
|
||||
cat << EOF > $tmp_dir/postgresql.sql
|
||||
CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest';
|
||||
CREATE DATABASE openstack_citest WITH OWNER openstack_citest;
|
||||
EOF
|
||||
chmod 777 $tmp_dir/postgresql.sql
|
||||
sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql
|
||||
}
|
||||
|
||||
echo "TODO: Add getopts support to select which DB you want to install"
|
||||
|
||||
echo "MYSQL"
|
||||
_install_mysql
|
||||
|
||||
echo "POSTGRES"
|
||||
_install_postgres
|
Loading…
Reference in New Issue