diff --git a/lower-constraints.txt b/lower-constraints.txt index 3c852d710..43351d6da 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -46,7 +46,7 @@ jsonschema==3.2.0 keystoneauth1==4.3.1 keystonemiddleware==4.17.0 kombu==4.3.0 -kubernetes==11.0.0 +kubernetes==18.20.0 linecache2==1.0.0 Mako==1.0.7 MarkupSafe==1.1 @@ -116,7 +116,7 @@ python-swiftclient==3.5.0 python-tackerclient==0.8.0 python-cinderclient==8.0.0 pytz==2018.3 -PyYAML==5.1 +PyYAML==5.4.1 repoze.lru==0.7 requests-oauthlib==0.8.0 requests==2.25.1 diff --git a/requirements.txt b/requirements.txt index 197e3c2d4..b092daa4a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -45,10 +45,10 @@ pyroute2>=0.4.21;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) python-mistralclient>=4.2.0 # Apache-2.0 python-barbicanclient>=4.5.2 # Apache-2.0 castellan>=0.16.0 # Apache-2.0 -kubernetes>=11.0.0 # Apache-2.0 +kubernetes>=18.20.0 # Apache-2.0 setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=21.0.0 # PSF/ZPL tooz>=1.58.0 # Apache-2.0 -PyYAML>=5.1 # MIT +PyYAML>=5.4.1 # MIT PyMySQL>=0.10.1 # MIT # Glance Store diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/api-service.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/api-service.yaml index c6d30f046..f24d5205b 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/api-service.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/api-service.yaml @@ -6,4 +6,6 @@ spec: group: currytest.k8s.io groupPriorityMinimum: 17000 version: v1beta1 - versionPriority: 5 \ No newline at end of file + versionPriority: 5 + service: + name: test \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/daemon-set.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/daemon-set.yaml index b73b3fa7e..9e6185b9d 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/daemon-set.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/daemon-set.yaml @@ -14,3 +14,11 @@ spec: containers: - image: nginx name: nginx +status: + currentNumberScheduled: 1 + desiredNumberScheduled: 1 + numberMisscheduled: 1 + numberReady: 1 + conditions: + - status: True + type: DaemonSet diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/deployment.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/deployment.yaml index e85bf7e4b..8d9c36dac 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/deployment.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/deployment.yaml @@ -7,6 +7,9 @@ spec: selector: matchLabels: selector: curry-probe-test001 + matchExpressions: + - key: test + operator: test template: metadata: labels: @@ -38,3 +41,7 @@ spec: path: / failureThreshold: 2 periodSeconds: 2 +status: + conditions: + - status: True + type: Deployment diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/horizontal-pod-autoscaler.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/horizontal-pod-autoscaler.yaml index 304d81c2e..d185fddd2 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/horizontal-pod-autoscaler.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/horizontal-pod-autoscaler.yaml @@ -11,3 +11,6 @@ spec: kind: Deployment name: curry-svc-vdu001 targetCPUUtilizationPercentage: 40 +status: + currentReplicas: 1 + desiredReplicas: 1 diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/job.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/job.yaml index 9bebf7022..5a364a589 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/job.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/job.yaml @@ -24,4 +24,7 @@ spec: limits: {} requests: {} restartPolicy: OnFailure -status: {} +status: + conditions: + - status: True + type: Job diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/limit-range.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/limit-range.yaml index 48e30d0c3..f9ec8755d 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/limit-range.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/limit-range.yaml @@ -7,4 +7,5 @@ spec: limits: - default: cpu: 500m - memory: 512M \ No newline at end of file + memory: 512M + type: test diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/namespace.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/namespace.yaml index 0af7d1831..9ed33b405 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/namespace.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/namespace.yaml @@ -1,4 +1,8 @@ apiVersion: v1 kind: Namespace metadata: - name: curry-ns \ No newline at end of file + name: curry-ns +status: + conditions: + - status: True + type: Namespace \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/network-policy.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/network-policy.yaml index 708efc0be..a16e79195 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/network-policy.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/network-policy.yaml @@ -5,4 +5,11 @@ metadata: spec: podSelector: {} policyTypes: - - Egress \ No newline at end of file + - Egress + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 5978 \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/node.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/node.yaml index f730b3560..a1639013b 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/node.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/node.yaml @@ -4,3 +4,38 @@ metadata: name: curry-node-test labels: name: curry-node-test +spec: + configSource: + configMap: + name: CONFIG_MAP_NAME + namespace: kube-system + kubeletConfigKey: kubelet + taints: + - effect: 'test' + key: 'test' +status: + addresses: + - address: '1.1.1.1' + type: 'test' + conditions: + - status: True + type: Node + daemonEndpoints: + kubeletEndpoint: + port: 8080 + images: + - names: 'test' + nodeInfo: + architecture: 'test' + bootId: 'test' + containerRuntimeVersion: 'test' + kernelVersion: 'test' + kubeProxyVersion: 'test' + kubeletVersion: 'test' + machineId: 'test' + operatingSystem: 'test' + osImage: 'test' + systemUuid: 'test' + volumesAttached: + - device_path: 'test' + name: 'test' diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume-claim.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume-claim.yaml index f36029d5e..ecba4ac7b 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume-claim.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume-claim.yaml @@ -9,4 +9,11 @@ spec: resources: requests: storage: 2Gi - storageClassName: curry-sc-local \ No newline at end of file + storageClassName: curry-sc-local + dataSource: + name: existing-src-pvc-name + kind: PersistentVolumeClaim +status: + conditions: + - status: True + type: PersistentVolumeClaim \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume.yaml index bf075185b..6bcb9c85d 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume.yaml @@ -11,4 +11,44 @@ spec: path: /data/curry-sc-test type: DirectoryOrCreate persistentVolumeReclaimPolicy: Delete - storageClassName: curry-sc-local \ No newline at end of file + storageClassName: curry-sc-local + azureFile: + secretName: azure-secret + shareName: aksshare + readOnly: false + cephfs: + monitors: + - 10.16.154.78:6789 + cinder: + volumeID: "90d6900d-808f-4ddb-a30e-5ef821f58b4e" + fsType: ext4 + csi: + driver: csi-nfsplugin + volume_handle: data-id + flexVolume: + driver: "kubernetes.io/lvm" + fsType: "ext4" + glusterfs: + endpoints: glusterfs-cluster + path: kube_vol + readOnly: true + iscsi: + targetPortal: 10.0.2.15:3260 + iqn: iqn.2001-04.com.example:storage.kube.sys1.xyz + lun: 0 + local: + path: /mnt/disks/ssd1 + rbd: + monitors: + - '10.16.154.78:6789' + image: foo + scaleIO: + gateway: https://localhost:443/api + system: scaleio + secretRef: + name: sio-secret + awsElasticBlockStore: + volumeID: "123" + azureDisk: + diskName: test.vhd + diskURI: https://someaccount.blob.microsoft.net/vhds/test.vhd diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod-template.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod-template.yaml index be7a29666..bdb785fa6 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod-template.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod-template.yaml @@ -40,4 +40,80 @@ template: - name: curry-claim-volume persistentVolumeClaim: claimName: curry-pv-claim + azureFile: + secretName: azure-secret + shareName: aksshare + readOnly: false + cephfs: + monitors: + - 10.16.154.78:6789 + cinder: + volumeID: "90d6900d-808f-4ddb-a30e-5ef821f58b4e" + fsType: ext4 + configMap: + name: log-config + items: + - key: log_level + path: log_level + csi: + driver: csi-nfsplugin + volume_handle: data-id + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + resourceFieldRef: + resource: limits.cpu + flexVolume: + driver: "kubernetes.io/lvm" + fsType: "ext4" + glusterfs: + endpoints: glusterfs-cluster + path: kube_vol + readOnly: true + gcePersistentDisk: + pdName: my-data-disk + fsType: ext4 + gitRepo: + repository: "git@somewhere:me/my-git-repository.git" + revision: "22f1d8406d464b0c0874075539c1f2e96c253775" + hostPath: + path: /var/local/aaa + type: DirectoryOrCreate + iscsi: + targetPortal: 10.0.2.15:3260 + iqn: iqn.2001-04.com.example:storage.kube.sys1.xyz + lun: 0 + nfs: + server: nfs-server.default.svc.cluster.local + path: "/" + photonPersistentDisk: + pdId: 'test' + portworxVolume: + volumeID: "pxvol" + projected: + sources: + - secret: + name: mysecret + items: + - key: username + path: my-group/my-username + serviceAccountToken: + path: 'test' + quobyte: + registry: 'test' + volume: 'test' + rbd: + monitors: + - '10.16.154.78:6789' + image: foo + scaleIO: + gateway: https://localhost:443/api + system: scaleio + secretRef: + name: sio-secret + vsphereVolume: + volumePath: "[DatastoreName] volumes/myDisk" + fsType: ext4 terminationGracePeriodSeconds: 0 \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod.yaml index 3a137c442..ccd51bd93 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod.yaml @@ -2,6 +2,13 @@ apiVersion: v1 kind: Pod metadata: name: curry-endpoint-test001 + ownerReferences: + - apiVersion: apps/v1 + controller: true + blockOwnerDeletion: true + kind: ReplicaSet + name: my-repset + uid: d9607e19-f88f-11e6-a518-42010a800195 spec: containers: - image: celebdor/kuryr-demo @@ -9,3 +16,84 @@ spec: name: web-server ports: - containerPort: 8080 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + httpHeaders: + - name: Custom-Header + value: Awesome + tcpSocket: + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 3 + volumeDevices: + - name: data + devicePath: /dev/xvda + volumeMounts: + - name: redis-storage + mountPath: /data/redis + env: + - value_from: + config_map_key_ref: + key: test + secret_key_ref: + key: test + name: test + readinessGates: + - conditionType: "www.example.com/feature-1" + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: "0" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + foo: bar + ephemeralContainers: + - name: debugger + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/e2e-az-name + operator: In + values: + - e2e-az1 + - e2e-az2 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S2 + topologyKey: topology.kubernetes.io/zone +status: + containerStatuses: + - lastState: + terminated: + exitCode: 1 + image: test + image_id: 123 + name: test + ready: True + restart_count: 1 + conditions: + - status: True + type: Pod diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/replica-set.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/replica-set.yaml index 90b364bc5..592c220f6 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/replica-set.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/replica-set.yaml @@ -14,4 +14,9 @@ spec: spec: containers: - image: nginx - name: nginx \ No newline at end of file + name: nginx +status: + replicas: 1 + conditions: + - status: True + type: ReplicaSet \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/resource-quota.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/resource-quota.yaml index 4a886a9e5..c066a1305 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/resource-quota.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/resource-quota.yaml @@ -9,3 +9,9 @@ spec: memory: 2Gi scopes: - NotBestEffort + scopeSelector: + matchExpressions: + - scopeName: PriorityClass + operator: In + values: + - middle diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-rule-review.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-rule-review.yaml index d5e943fdb..a45a85881 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-rule-review.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-rule-review.yaml @@ -1,4 +1,12 @@ apiVersion: authorization.k8s.io/v1 kind: SelfSubjectRulesReview spec: - namespace: curry-ns \ No newline at end of file + namespace: curry-ns +status: + resourceRules: + - verbs: + - 'test' + incomplete: True + nonResourceRules: + - verbs: + - 'test' diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/stateful-set.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/stateful-set.yaml index e704cb18f..d4003dc85 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/stateful-set.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/stateful-set.yaml @@ -31,3 +31,8 @@ spec: resources: requests: storage: 1Gi +status: + conditions: + - status: True + type: StatefulSet + replicas: 1 diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/storage-class.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/storage-class.yaml index ac72f61b3..6ee3e1505 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/storage-class.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/storage-class.yaml @@ -4,5 +4,10 @@ metadata: name: curry-sc-local provisioner: kubernetes.io/no-provisioner volumeBindingMode: WaitForFirstConsumer +allowedTopologies: +- matchLabelExpressions: + - key: failure-domain.beta.kubernetes.io/zone + values: + - us-central1-a #volumeBindingMode: Immediate #reclaimPolicy: Retain \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/subject-access-review.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/subject-access-review.yaml index 6022be645..c00f44e26 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/subject-access-review.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/subject-access-review.yaml @@ -6,4 +6,6 @@ spec: group: apps resource: deployments verb: create - namespace: curry-ns \ No newline at end of file + namespace: curry-ns +status: + allowed: True \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/volume-attachment.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/volume-attachment.yaml index 0a60063bd..aeba7ea47 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/volume-attachment.yaml +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/volume-attachment.yaml @@ -7,4 +7,6 @@ spec: attacher: nginx node_name: nginx source: - persistent_volume_name: curry-sc-pvc \ No newline at end of file + persistent_volume_name: curry-sc-pvc +status: + attached: True \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py index 7691b65d1..132b10d81 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py @@ -63,369 +63,794 @@ class TestTransformer(base.TestCase): def test_deployment(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['deployment.yaml'], self.yaml_path) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'Deployment') - self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') + self.assertEqual(k8s_obj.kind, 'Deployment') + self.assertEqual(k8s_obj.api_version, 'apps/v1') + + # V1DeploymentCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'Deployment') + # V1DeploymentSpec + self.assertIsNotNone(k8s_obj.spec.selector) + self.assertIsNotNone(k8s_obj.spec.template) + # V1LabelSelectorRequirement + self.assertEqual(k8s_obj.spec.selector. + match_expressions[0].key, 'test') + self.assertEqual(k8s_obj.spec.selector. + match_expressions[0].operator, 'test') def test_api_service(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['api-service.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'APIService') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'apiregistration.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'APIService') + self.assertEqual(k8s_obj.api_version, 'apiregistration.k8s.io/v1') + # V1APIServiceSpec + self.assertEqual(k8s_obj.spec.group_priority_minimum, 17000) + self.assertIsNotNone(k8s_obj.spec.service) + self.assertEqual(k8s_obj.spec.version_priority, 5) def test_cluster_role(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['cluster-role.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'ClusterRole') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'rbac.authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'ClusterRole') + self.assertEqual(k8s_obj.api_version, 'rbac.authorization.k8s.io/v1') + # V1PolicyRule + self.assertIsNotNone(k8s_obj.rules[0].verbs) def test_cluster_role_binding(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['cluster-role-binding.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'ClusterRoleBinding') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'rbac.authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'ClusterRoleBinding') + self.assertEqual(k8s_obj.api_version, 'rbac.authorization.k8s.io/v1') + # V1ClusterRoleBinding + self.assertIsNotNone(k8s_obj.role_ref) + # V1RoleRef + self.assertEqual(k8s_obj.role_ref.api_group, + 'rbac.authorization.k8s.io') + self.assertEqual(k8s_obj.role_ref.kind, 'ClusterRole') + self.assertEqual(k8s_obj.role_ref.name, 'curry-cluster-role') + # V1Subject + self.assertEqual(k8s_obj.subjects[0].kind, 'ServiceAccount') + self.assertEqual(k8s_obj.subjects[0].name, 'curry-cluster-sa') def test_config_map(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['config-map.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'ConfigMap') - self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + self.assertEqual(k8s_obj.kind, 'ConfigMap') + self.assertEqual(k8s_obj.api_version, 'v1') def test_daemon_set(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['daemon-set.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'DaemonSet') - self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') + self.assertEqual(k8s_obj.kind, 'DaemonSet') + self.assertEqual(k8s_obj.api_version, 'apps/v1') + # V1DaemonSetStatus + self.assertEqual(k8s_obj.status.current_number_scheduled, 1) + self.assertEqual(k8s_obj.status.desired_number_scheduled, 1) + self.assertEqual(k8s_obj.status.number_misscheduled, 1) + self.assertEqual(k8s_obj.status.number_ready, 1) + # V1DaemonSetCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'DaemonSet') + # V1DaemonSetSpec + self.assertIsNotNone(k8s_obj.spec.selector) + self.assertIsNotNone(k8s_obj.spec.template) def test_horizontal_pod_autoscaler(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['horizontal-pod-autoscaler.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'default') - self.assertEqual(k8s_objs[0].get('object').kind, - 'HorizontalPodAutoscaler') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'autoscaling/v1') + self.assertEqual(k8s_obj.kind, 'HorizontalPodAutoscaler') + self.assertEqual(k8s_obj.api_version, 'autoscaling/v1') + # V1HorizontalPodAutoscalerSpec + self.assertEqual(k8s_obj.spec.max_replicas, 3) + self.assertIsNotNone(k8s_obj.spec.scale_target_ref) + # V1CrossVersionObjectReference + self.assertEqual(k8s_obj.spec.scale_target_ref.kind, 'Deployment') + self.assertEqual(k8s_obj.spec.scale_target_ref.name, + 'curry-svc-vdu001') + # V1HorizontalPodAutoscalerStatus + self.assertEqual(k8s_obj.status.current_replicas, 1) + self.assertEqual(k8s_obj.status.desired_replicas, 1) def test_job(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['job.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'Job') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'batch/v1') + self.assertEqual(k8s_obj.kind, 'Job') + self.assertEqual(k8s_obj.api_version, 'batch/v1') + # V1JobCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'Job') + # V1JobSpec + self.assertIsNotNone(k8s_obj.spec.template) def test_lease(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['lease.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'default') - self.assertEqual(k8s_objs[0].get('object').kind, 'Lease') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'coordination.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'Lease') + self.assertEqual(k8s_obj.api_version, 'coordination.k8s.io/v1') def test_local_subject_access_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['local-subject-access-review.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'LocalSubjectAccessReview') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'LocalSubjectAccessReview') + self.assertEqual(k8s_obj.api_version, 'authorization.k8s.io/v1') + self.assertIsNotNone(k8s_obj.spec) def test_namespace(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['namespace.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'Namespace') - self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + self.assertEqual(k8s_obj.kind, 'Namespace') + self.assertEqual(k8s_obj.api_version, 'v1') + + # V1NamespaceCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'Namespace') def test_network_policy(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['network-policy.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'NetworkPolicy') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'networking.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'NetworkPolicy') + self.assertEqual(k8s_obj.api_version, 'networking.k8s.io/v1') + + # V1IPBlock + self.assertEqual(k8s_obj.spec.egress[0].to[0].ip_block.cidr, + '10.0.0.0/24') + # V1NetworkPolicySpec + self.assertIsNotNone(k8s_obj.spec.pod_selector) def test_node(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['node.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'Node') - self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + self.assertEqual(k8s_obj.kind, 'Node') + self.assertEqual(k8s_obj.api_version, 'v1') + + # V1ConfigMapNodeConfigSource + self.assertEqual(k8s_obj.spec.config_source. + config_map.kubelet_config_key, 'kubelet') + self.assertEqual(k8s_obj.spec.config_source. + config_map.namespace, 'kube-system') + self.assertEqual(k8s_obj.spec.config_source. + config_map.name, 'CONFIG_MAP_NAME') + # V1Taint + self.assertEqual(k8s_obj.spec.taints[0].key, 'test') + self.assertEqual(k8s_obj.spec.taints[0].effect, 'test') + # V1NodeAddress + self.assertEqual(k8s_obj.status.addresses[0].address, '1.1.1.1') + self.assertEqual(k8s_obj.status.addresses[0].type, 'test') + # V1NodeCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'Node') + # V1DaemonEndpoint + self.assertEqual(k8s_obj.status.daemon_endpoints. + kubelet_endpoint.port, 8080) + # V1ContainerImage + self.assertEqual(k8s_obj.status.images[0].names, 'test') + # V1NodeSystemInfo + self.assertEqual(k8s_obj.status.node_info.architecture, 'test') + self.assertEqual(k8s_obj.status.node_info.boot_id, 'test') + self.assertEqual(k8s_obj.status.node_info. + container_runtime_version, 'test') + self.assertEqual(k8s_obj.status.node_info.kube_proxy_version, 'test') + self.assertEqual(k8s_obj.status.node_info.kubelet_version, 'test') + self.assertEqual(k8s_obj.status.node_info.machine_id, 'test') + self.assertEqual(k8s_obj.status.node_info.operating_system, 'test') + self.assertEqual(k8s_obj.status.node_info.os_image, 'test') + self.assertEqual(k8s_obj.status.node_info.system_uuid, 'test') + # V1AttachedVolume + self.assertEqual(k8s_obj.status.volumes_attached[0]. + device_path, 'test') + self.assertEqual(k8s_obj.status.volumes_attached[0].name, 'test') def test_persistent_volume(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['persistent-volume.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, 'PersistentVolume') - self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + self.assertEqual(k8s_obj.kind, 'PersistentVolume') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1AzureFilePersistentVolumeSource + self.assertEqual(k8s_obj.spec.azure_file.secret_name, 'azure-secret') + self.assertEqual(k8s_obj.spec.azure_file.share_name, 'aksshare') + # V1CephFSPersistentVolumeSource + self.assertEqual(k8s_obj.spec.cephfs.monitors[0], '10.16.154.78:6789') + # V1CinderPersistentVolumeSource + self.assertEqual(k8s_obj.spec.cinder.volume_id, + '90d6900d-808f-4ddb-a30e-5ef821f58b4e') + # V1CSIPersistentVolumeSource + self.assertEqual(k8s_obj.spec.csi.driver, 'csi-nfsplugin') + self.assertEqual(k8s_objs[0].get('object').spec.csi.volume_handle, + 'data-id') + # V1FlexPersistentVolumeSource + self.assertEqual(k8s_objs[0].get('object').spec.flex_volume.driver, + 'kubernetes.io/lvm') + # V1GlusterfsPersistentVolumeSource + self.assertEqual(k8s_objs[0].get('object').spec.glusterfs.endpoints, + 'glusterfs-cluster') + self.assertEqual(k8s_obj.spec.glusterfs.path, 'kube_vol') + # V1ISCSIPersistentVolumeSource + self.assertEqual(k8s_obj.spec.iscsi.target_portal, '10.0.2.15:3260') + self.assertEqual(k8s_obj.spec.iscsi.iqn, + 'iqn.2001-04.com.example:storage.kube.sys1.xyz') + self.assertEqual(k8s_obj.spec.iscsi.lun, 0) + # V1LocalVolumeSource + self.assertEqual(k8s_obj.spec.local.path, '/mnt/disks/ssd1') + # V1RBDPersistentVolumeSource + self.assertEqual(k8s_obj.spec.rbd.monitors[0], '10.16.154.78:6789') + self.assertEqual(k8s_obj.spec.rbd.image, 'foo') + # V1ScaleIOPersistentVolumeSource + self.assertEqual(k8s_obj.spec.scale_io.gateway, + 'https://localhost:443/api') + self.assertIsNotNone(k8s_obj.spec.scale_io.secret_ref) + self.assertEqual(k8s_obj.spec.scale_io.system, 'scaleio') + # V1AWSElasticBlockStoreVolumeSource + self.assertEqual(k8s_obj.spec.aws_elastic_block_store.volume_id, + '123') + # V1AzureDiskVolumeSource + self.assertEqual(k8s_obj.spec.azure_disk.disk_name, 'test.vhd') + self.assertEqual( + k8s_obj.spec.azure_disk.disk_uri, + 'https://someaccount.blob.microsoft.net/vhds/test.vhd') def test_persistent_volume_claim(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['persistent-volume-claim.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, + self.assertEqual(k8s_obj.kind, 'PersistentVolumeClaim') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1PersistentVolumeClaimCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, + 'PersistentVolumeClaim') + # V1TypedLocalObjectReference + self.assertEqual(k8s_obj.spec.data_source.name, + 'existing-src-pvc-name') + self.assertEqual(k8s_obj.spec.data_source.kind, 'PersistentVolumeClaim') - self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') def test_pod(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['pod.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'Pod') - self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + self.assertEqual(k8s_obj.kind, 'Pod') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1NodeSelector + self.assertIsNotNone( + k8s_obj.spec.affinity.node_affinity. + required_during_scheduling_ignored_during_execution. + node_selector_terms) + # V1NodeSelectorRequirement + self.assertEqual( + k8s_obj.spec.affinity.node_affinity. + required_during_scheduling_ignored_during_execution. + node_selector_terms[0].match_expressions[0].key, + 'kubernetes.io/e2e-az-name') + self.assertEqual( + k8s_obj.spec.affinity.node_affinity. + required_during_scheduling_ignored_during_execution. + node_selector_terms[0].match_expressions[0].operator, + 'In') + # V1PreferredSchedulingTerm + self.assertEqual( + k8s_obj.spec.affinity.node_affinity. + preferred_during_scheduling_ignored_during_execution[0]. + weight, 1) + self.assertIsNotNone( + k8s_obj.spec.affinity.node_affinity. + preferred_during_scheduling_ignored_during_execution[0]. + preference) + # V1PodAffinityTerm + self.assertEqual( + k8s_obj.spec.affinity.pod_anti_affinity. + preferred_during_scheduling_ignored_during_execution[0]. + pod_affinity_term.topology_key, 'topology.kubernetes.io/zone') + # V1WeightedPodAffinityTerm + self.assertEqual( + k8s_obj.spec.affinity.pod_anti_affinity. + preferred_during_scheduling_ignored_during_execution[0]. + weight, 100) + self.assertIsNotNone( + k8s_obj.spec.affinity.pod_anti_affinity. + preferred_during_scheduling_ignored_during_execution[0]. + pod_affinity_term) + # V1OwnerReference + self.assertEqual(k8s_obj.metadata.owner_references[0].api_version, + 'apps/v1') + self.assertEqual(k8s_obj.metadata.owner_references[0].kind, + 'ReplicaSet') + self.assertEqual(k8s_obj.metadata.owner_references[0].name, + 'my-repset') + self.assertEqual(k8s_obj.metadata.owner_references[0].uid, + 'd9607e19-f88f-11e6-a518-42010a800195') + # V1HTTPHeader + self.assertEqual(k8s_obj.spec.containers[0].liveness_probe.http_get. + http_headers[0].name, 'Custom-Header') + self.assertEqual(k8s_obj.spec.containers[0].liveness_probe.http_get. + http_headers[0].value, 'Awesome') + # V1TCPSocketAction + self.assertEqual(k8s_obj.spec.containers[0].liveness_probe. + tcp_socket.port, 8080) + # V1VolumeDevice + self.assertEqual(k8s_obj.spec.containers[0].volume_devices[0]. + device_path, '/dev/xvda') + self.assertEqual(k8s_obj.spec.containers[0].volume_devices[0].name, + 'data') + # V1PodReadinessGate + self.assertEqual(k8s_obj.spec.readiness_gates[0].condition_type, + 'www.example.com/feature-1') + # V1Sysctl + self.assertEqual(k8s_obj.spec.security_context.sysctls[0].name, + 'kernel.shm_rmid_forced') + self.assertEqual(k8s_obj.spec.security_context.sysctls[0].value, '0') + # V1ContainerStateTerminated + self.assertEqual(k8s_obj.status.container_statuses[0].last_state. + terminated.exit_code, 1) + # V1EphemeralContainer + self.assertEqual(k8s_obj.spec.topology_spread_constraints[0]. + topology_key, 'zone') + # V1TopologySpreadConstraint + self.assertEqual(k8s_obj.spec.ephemeral_containers[0].name, + 'debugger') + # V1HTTPGetAction + self.assertEqual(k8s_obj.spec.containers[0].liveness_probe. + http_get.port, 8080) + # V1ConfigMapKeySelector + self.assertEqual(k8s_obj.spec.containers[0].env[0].value_from. + config_map_key_ref.key, 'test') + # V1EnvVar + self.assertEqual(k8s_obj.spec.containers[0].env[0].name, 'test') + # V1SecretKeySelector + self.assertEqual(k8s_obj.spec.containers[0].env[0].value_from. + secret_key_ref.key, 'test') + # V1ContainerPort + self.assertEqual(k8s_obj.spec.containers[0].ports[0]. + container_port, 8080) + # V1VolumeMount + self.assertEqual(k8s_obj.spec.containers[0].volume_mounts[0]. + mount_path, '/data/redis') + self.assertEqual(k8s_obj.spec.containers[0].volume_mounts[0]. + name, 'redis-storage') + # V1PodCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'Pod') + # V1ContainerStatus + self.assertEqual(k8s_obj.status.container_statuses[0].image, 'test') + self.assertEqual(k8s_obj.status.container_statuses[0].image_id, 123) + self.assertEqual(k8s_obj.status.container_statuses[0].name, 'test') + self.assertEqual(k8s_obj.status.container_statuses[0].ready, True) + self.assertEqual(k8s_obj.status.container_statuses[0].restart_count, 1) def test_priority_class(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['priority-class.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'PriorityClass') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'scheduling.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'PriorityClass') + self.assertEqual(k8s_obj.api_version, 'scheduling.k8s.io/v1') + # V1PriorityClass + self.assertEqual(k8s_obj.value, 1000000) def test_replica_set(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['replica-set.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'ReplicaSet') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'apps/v1') + self.assertEqual(k8s_obj.kind, 'ReplicaSet') + self.assertEqual(k8s_obj.api_version, 'apps/v1') + + # V1ReplicaSetStatus + self.assertEqual(k8s_obj.status.replicas, 1) + # V1ReplicaSetCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'ReplicaSet') + # V1ReplicaSetSpec + self.assertIsNotNone(k8s_obj.spec.selector) + self.assertIsNotNone(k8s_obj.spec.template) def test_resource_quota(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['resource-quota.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'ResourceQuota') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'v1') + self.assertEqual(k8s_obj.kind, 'ResourceQuota') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1ScopedResourceSelectorRequirement + self.assertEqual(k8s_obj.spec.scope_selector. + match_expressions[0].operator, 'In') + self.assertEqual(k8s_obj.spec.scope_selector. + match_expressions[0].scope_name, 'PriorityClass') def test_role(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['role.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'Role') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'rbac.authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'Role') + self.assertEqual(k8s_obj.api_version, 'rbac.authorization.k8s.io/v1') def test_role_binding(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['role-bindings.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'RoleBinding') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'rbac.authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'RoleBinding') + self.assertEqual(k8s_obj.api_version, 'rbac.authorization.k8s.io/v1') + # V1RoleBinding + self.assertIsNotNone(k8s_obj.role_ref) def test_secret(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['secret.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'default') - self.assertEqual(k8s_objs[0].get('object').kind, - 'Secret') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'v1') + self.assertEqual(k8s_obj.kind, 'Secret') + self.assertEqual(k8s_obj.api_version, 'v1') def test_self_subject_access_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['self-subject-access-review.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'SelfSubjectAccessReview') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'SelfSubjectAccessReview') + self.assertEqual(k8s_obj.api_version, 'authorization.k8s.io/v1') + # V1SelfSubjectAccessReview + self.assertIsNotNone(k8s_obj.spec) def test_self_subject_rules_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['self-subject-rule-review.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'SelfSubjectRulesReview') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'SelfSubjectRulesReview') + self.assertEqual(k8s_obj.api_version, 'authorization.k8s.io/v1') + # V1ResourceRule + self.assertEqual(k8s_obj.status.resource_rules[0].verbs[0], 'test') + # V1SelfSubjectRulesReview + self.assertIsNotNone(k8s_obj.spec) + # V1SubjectRulesReviewStatus + self.assertIsNotNone(k8s_obj.status.resource_rules) + self.assertIsNotNone(k8s_obj.status.non_resource_rules) + self.assertEqual(k8s_obj.status.incomplete, True) + # V1NonResourceRule + self.assertEqual(k8s_obj.status.non_resource_rules[0].verbs[0], 'test') def test_service(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['service.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'default') - self.assertEqual(k8s_objs[0].get('object').kind, - 'Service') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'v1') + self.assertEqual(k8s_obj.kind, 'Service') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1ServicePort + self.assertEqual(k8s_obj.spec.ports[0].port, 80) def test_service_account(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['service-account.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'default') - self.assertEqual(k8s_objs[0].get('object').kind, - 'ServiceAccount') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'v1') + self.assertEqual(k8s_obj.kind, 'ServiceAccount') + self.assertEqual(k8s_obj.api_version, 'v1') def test_stateful_set(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['stateful-set.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'StatefulSet') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'apps/v1') + self.assertEqual(k8s_obj.kind, 'StatefulSet') + self.assertEqual(k8s_obj.api_version, 'apps/v1') + # V1StatefulSetSpec + self.assertIsNotNone(k8s_obj.spec.selector) + self.assertIsNotNone(k8s_obj.spec.template) + self.assertEqual(k8s_obj.spec.service_name, 'nginx') + # V1StatefulSetCondition + self.assertEqual(k8s_obj.status.conditions[0].status, True) + self.assertEqual(k8s_obj.status.conditions[0].type, 'StatefulSet') + # V1StatefulSetStatus + self.assertEqual(k8s_obj.status.replicas, 1) def test_storage_class(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['storage-class.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'StorageClass') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'storage.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'StorageClass') + self.assertEqual(k8s_obj.api_version, 'storage.k8s.io/v1') + # V1StorageClass + self.assertEqual(k8s_obj.provisioner, 'kubernetes.io/no-provisioner') + # V1TopologySelectorLabelRequirement + self.assertEqual(k8s_obj.allowed_topologies[0]. + match_label_expressions[0].key, + 'failure-domain.beta.kubernetes.io/zone') + self.assertEqual(k8s_obj.allowed_topologies[0]. + match_label_expressions[0].values[0], + 'us-central1-a') def test_subject_access_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['subject-access-review.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'SubjectAccessReview') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'authorization.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'SubjectAccessReview') + self.assertEqual(k8s_obj.api_version, 'authorization.k8s.io/v1') + # V1SubjectAccessReviewStatus + self.assertEqual(k8s_obj.status.allowed, True) + # V1SubjectAccessReview + self.assertIsNotNone(k8s_obj.spec) def test_token_review(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['token-review.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), '') - self.assertEqual(k8s_objs[0].get('object').kind, - 'TokenReview') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'authentication.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'TokenReview') + self.assertEqual(k8s_obj.api_version, 'authentication.k8s.io/v1') + # V1TokenReview + self.assertIsNotNone(k8s_obj.spec) def test_limit_range(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['limit-range.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'LimitRange') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'v1') + self.assertEqual(k8s_obj.kind, 'LimitRange') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1LimitRangeSpec + self.assertIsNotNone(k8s_obj.spec.limits) + self.assertIsNotNone(k8s_obj.spec.limits[0].type) def test_pod_template(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['pod-template.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'PodTemplate') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'v1') + self.assertEqual(k8s_obj.kind, 'PodTemplate') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1AzureFileVolumeSource + self.assertEqual(k8s_obj.template.spec.volumes[0]. + azure_file.secret_name, 'azure-secret') + # V1CephFSVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].cephfs.monitors[0], + '10.16.154.78:6789') + # V1CinderVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].cinder.volume_id, + '90d6900d-808f-4ddb-a30e-5ef821f58b4e') + # V1KeyToPath + self.assertEqual( + k8s_obj.template.spec.volumes[0].config_map.items[0].key, + 'log_level') + self.assertEqual( + k8s_obj.template.spec.volumes[0].config_map.items[0].path, + 'log_level') + # V1CSIVolumeSource + self.assertEqual(k8s_obj.template.spec.volumes[0].csi.driver, + 'csi-nfsplugin') + # V1DownwardAPIVolumeFile + self.assertEqual(k8s_obj.template.spec.volumes[0]. + downward_api.items[0].path, 'labels') + # V1ObjectFieldSelector + self.assertEqual( + k8s_obj.template.spec.volumes[0].downward_api.items[0]. + field_ref.field_path, 'metadata.labels') + # V1ResourceFieldSelector + self.assertEqual( + k8s_obj.template.spec.volumes[0].downward_api.items[0]. + resource_field_ref.resource, 'limits.cpu') + # V1FlexVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].flex_volume.driver, + 'kubernetes.io/lvm') + # V1GCEPersistentDiskVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0]. + gce_persistent_disk.pd_name, 'my-data-disk') + # V1GitRepoVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0]. + git_repo.repository, 'git@somewhere:me/my-git-repository.git') + # V1GlusterfsVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].glusterfs.endpoints, + 'glusterfs-cluster') + self.assertEqual( + k8s_obj.template.spec.volumes[0].glusterfs.path, + 'kube_vol') + # V1HostPathVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].host_path.path, + '/var/local/aaa') + # V1ISCSIVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].iscsi.target_portal, + '10.0.2.15:3260') + self.assertEqual( + k8s_obj.template.spec.volumes[0].iscsi.iqn, + 'iqn.2001-04.com.example:storage.kube.sys1.xyz') + self.assertEqual(k8s_obj.template.spec.volumes[0].iscsi.lun, 0) + # V1Volume + self.assertEqual(k8s_obj.template.spec.volumes[0].name, + 'curry-claim-volume') + # V1NFSVolumeSource + self.assertEqual(k8s_obj.template.spec.volumes[0].nfs.path, '/') + self.assertEqual( + k8s_obj.template.spec.volumes[0].nfs.server, + 'nfs-server.default.svc.cluster.local') + # V1PersistentVolumeClaimVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0]. + persistent_volume_claim.claim_name, 'curry-pv-claim') + # V1PhotonPersistentDiskVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0]. + photon_persistent_disk.pd_id, 'test') + # V1PortworxVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0]. + portworx_volume.volume_id, 'pxvol') + # V1ProjectedVolumeSource + self.assertIsNotNone(k8s_obj.template.spec.volumes[0]. + projected.sources) + # V1QuobyteVolumeSource + self.assertIsNotNone( + k8s_obj.template.spec.volumes[0]. + quobyte.registry, 'test') + self.assertIsNotNone( + k8s_obj.template.spec.volumes[0]. + quobyte.volume, 'test') + # V1RBDVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].rbd.monitors[0], + '10.16.154.78:6789') + self.assertEqual(k8s_obj.template.spec.volumes[0].rbd.image, 'foo') + # V1ScaleIOVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].scale_io.gateway, + 'https://localhost:443/api') + self.assertIsNotNone( + k8s_obj.template.spec.volumes[0].scale_io.secret_ref) + self.assertEqual( + k8s_obj.template.spec.volumes[0].scale_io.system, 'scaleio') + # V1VsphereVirtualDiskVolumeSource + self.assertEqual( + k8s_obj.template.spec.volumes[0].vsphere_volume. + volume_path, '[DatastoreName] volumes/myDisk') + # V1ServiceAccountTokenProjection + self.assertEqual( + k8s_obj.template.spec.volumes[0]. + projected.sources[0].service_account_token.path, 'test') def test_volume_attachment(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['volume-attachment.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'VolumeAttachment') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'storage.k8s.io/v1') + self.assertEqual(k8s_obj.kind, 'VolumeAttachment') + self.assertEqual(k8s_obj.api_version, 'storage.k8s.io/v1') + # V1VolumeAttachment + self.assertIsNotNone(k8s_obj.spec) + # V1VolumeAttachmentSpec + self.assertEqual(k8s_obj.spec.attacher, 'nginx') + self.assertEqual(k8s_obj.spec.node_name, 'nginx') + self.assertIsNotNone(k8s_obj.spec.source) + # V1VolumeAttachmentStatus + self.assertEqual(k8s_obj.status.attached, True) def test_bindings(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['bindings.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'Binding') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'v1') + self.assertEqual(k8s_obj.kind, 'Binding') + self.assertEqual(k8s_obj.api_version, 'v1') + # V1Binding + self.assertIsNotNone(k8s_obj.target) def test_controller_revision(self): k8s_objs = self.transfromer.get_k8s_objs_from_yaml( ['controller-revision.yaml'], self.yaml_path ) - self.assertIsNotNone(k8s_objs[0].get('object')) + k8s_obj = k8s_objs[0].get('object') + self.assertIsNotNone(k8s_obj) self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') - self.assertEqual(k8s_objs[0].get('object').kind, - 'ControllerRevision') - self.assertEqual(k8s_objs[0].get('object').api_version, - 'apps/v1') + self.assertEqual(k8s_obj.kind, 'ControllerRevision') + self.assertEqual(k8s_obj.api_version, 'apps/v1') + # V1ControllerRevision + self.assertEqual(k8s_obj.revision, 1) def test_transform(self): container_obj = tosca_kube_object.Container( diff --git a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py index 8b4a699df..99d8d9d0e 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py +++ b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py @@ -166,11 +166,10 @@ class Transformer(object): # initiating k8s object, you need to # give the must param an empty value. must_param = { - 'RuntimeRawExtension': '(raw="")', 'V1LocalSubjectAccessReview': '(spec="")', 'V1HTTPGetAction': '(port="")', 'V1DeploymentSpec': '(selector="", template="")', - 'V1PodSpec': '(containers="")', + 'V1PodSpec': '(containers=[])', 'V1ConfigMapKeySelector': '(key="")', 'V1Container': '(name="")', 'V1EnvVar': '(name="")', @@ -182,7 +181,7 @@ class Transformer(object): 'image="", image_id="", ' 'name="", ready="", ' 'restart_count="")', - 'V1ServicePort': '(port="")', + 'V1ServicePort': '(port=80)', 'V1TypedLocalObjectReference': '(kind="", name="")', 'V1LabelSelectorRequirement': '(key="", operator="")', 'V1PersistentVolumeClaimCondition': '(status="", type="")', @@ -237,14 +236,15 @@ class Transformer(object): 'V1ScopedResourceSelectorRequirement': '(operator="", scope_name="")', 'V1APIServiceSpec': '(group_priority_minimum=0, ' - 'service="", version_priority=0)', + 'service="", ' + 'version_priority=0)', 'V1APIServiceCondition': '(status="", type="")', 'V1DaemonSetSpec': '(selector="", template="")', 'V1ReplicaSetSpec': '(selector="")', 'V1StatefulSetSpec': '(selector="", ' 'service_name="", template="")', 'V1StatefulSetCondition': '(status="", type="")', - 'V1StatefulSetStatus': '(replicas="")', + 'V1StatefulSetStatus': '(replicas=0)', 'V1ControllerRevision': '(revision=0)', 'V1TokenReview': '(spec="")', 'V1SubjectAccessReviewStatus': '(allowed=True)', @@ -274,6 +274,48 @@ class Transformer(object): 'V1VolumeAttachmentSpec': '(attacher="", node_name="", source="")', 'V1VolumeAttachmentStatus': '(attached=True)', + 'V1NodeSelector': '(node_selector_terms=[])', + 'V1NodeSelectorRequirement': '(key="", operator="")', + 'V1PreferredSchedulingTerm': '(preference="", weight=1)', + 'V1PodAffinityTerm': '(topology_key="")', + 'V1WeightedPodAffinityTerm': '(pod_affinity_term="", weight=1)', + 'V1OwnerReference': '(api_version="", kind="", name="", uid="")', + 'V1HTTPHeader': '(name="", value="")', + 'V1TCPSocketAction': '(port="")', + 'V1VolumeDevice': '(device_path="", name="")', + 'V1PodReadinessGate': '(condition_type="")', + 'V1Sysctl': '(name="", value="")', + 'V1ContainerStateTerminated': '(exit_code=0)', + 'V1AzureFilePersistentVolumeSource': '(secret_name="",' + ' share_name="")', + 'V1CephFSPersistentVolumeSource': '(monitors=[])', + 'V1CinderPersistentVolumeSource': '(volume_id="")', + 'V1CSIPersistentVolumeSource': '(driver="", volume_handle="")', + 'V1FlexPersistentVolumeSource': '(driver="")', + 'V1GlusterfsPersistentVolumeSource': '(endpoints="", path="")', + 'V1ISCSIPersistentVolumeSource': '(iqn="", lun=0,' + ' target_portal="")', + 'V1LocalVolumeSource': '(path="")', + 'V1RBDPersistentVolumeSource': '(image="", monitors=[])', + 'V1ScaleIOPersistentVolumeSource': '(' + 'gateway="",' + ' secret_ref="",' + ' system="")', + 'V1DaemonSetStatus': '(current_number_scheduled=0, ' + 'desired_number_scheduled=0, ' + 'number_misscheduled=0, ' + 'number_ready=0)', + 'V1DaemonSetCondition': '(status="", type="")', + 'V1DeploymentCondition': '(status="", type="")', + 'V1ReplicaSetStatus': '(replicas=0)', + 'V1ReplicaSetCondition': '(status="", type="")', + 'V1ResourceRule': '(verbs=[])', + 'V1JobCondition': '(status="", type="")', + 'V1IPBlock': '(cidr="")', + 'V1EphemeralContainer': '(name="")', + 'V1TopologySpreadConstraint': '(max_skew=0, topology_key="",' + ' when_unsatisfiable="")', + 'V1LimitRangeItem': '(type="")' } whole_kind = 'V1' + kind if whole_kind in must_param.keys(): diff --git a/upper-constraints.txt b/upper-constraints.txt index f302608ab..5dcad3ffe 100644 --- a/upper-constraints.txt +++ b/upper-constraints.txt @@ -395,7 +395,7 @@ flux===1.3.5 python-solumclient===3.2.0 PyMySQL===1.0.2 uhashring===2.0 -kubernetes===12.0.1 +kubernetes===18.20.0 httplib2===0.19.0 betamax===0.8.1 construct===2.10.61