Implementation CNF with VNFM and CISM in Tacker

Implements: blueprint cnf-support-with-etsi-nfv-specs

The CNF instantiation/termination
* Load CNF definition files in CSAR artifact
* Extend Kubernetes infra_driver for general APIs

Other modify
* update lower-constraints.txt and requirments.txt to
  satisfy our k8s client usage
  (kubernets:6.0.0->11.0.0, urllib3:1.22.0->1.24.2)

Change-Id: I20ffbaec14ac5fb8236bd61416c2604b7a6590f6
(cherry picked from commit 3ad581fed2)
This commit is contained in:
LiangLu 2020-08-19 01:45:15 -04:00 committed by Yasufumi Ogawa
parent 7e49a14139
commit 5f562fccfb
87 changed files with 6580 additions and 95 deletions

View File

@ -47,7 +47,7 @@ jsonschema==3.2.0
keystoneauth1==3.15.0
keystonemiddleware==4.17.0
kombu==4.3.0
kubernetes==7.0.0
kubernetes==11.0.0
linecache2==1.0.0
Mako==1.0.7
MarkupSafe==1.1
@ -146,7 +146,7 @@ tooz==1.58.0
tosca-parser==1.6.0
traceback2==1.4.0
unittest2==1.1.0
urllib3==1.22
urllib3==1.24.2
vine==1.1.4
voluptuous==0.11.1
waitress==1.1.0

View File

@ -48,7 +48,7 @@ pyroute2>=0.4.21;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2)
python-mistralclient!=3.2.0,>=3.1.0 # Apache-2.0
python-barbicanclient>=4.5.2 # Apache-2.0
castellan>=0.16.0 # Apache-2.0
kubernetes>=7.0.0 # Apache-2.0
kubernetes>=11.0.0 # Apache-2.0
setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=21.0.0 # PSF/ZPL
tooz>=1.58.0 # Apache-2.0
PyYAML>=5.1 # MIT

View File

@ -120,7 +120,10 @@ keyvalue_pairs = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. /]{1,255}$': {
'type': 'string', 'maxLength': 255
'anyOf': [
{'type': 'array'},
{'type': 'string', 'maxLength': 255}
]
}
},
'additionalProperties': False

View File

@ -51,6 +51,31 @@ class KubernetesHTTPAPI(object):
k8s_client = api_client.ApiClient(configuration=config)
return k8s_client
def get_k8s_client_dict(self, auth):
k8s_client_dict = {
'v1': self.get_core_v1_api_client(auth),
'apiregistration.k8s.io/v1':
self.get_api_registration_v1_api_client(auth),
'apps/v1': self.get_app_v1_api_client(auth),
'authentication.k8s.io/v1':
self.get_authentication_v1_api_client(auth),
'authorization.k8s.io/v1':
self.get_authorization_v1_api_client(auth),
'autoscaling/v1': self.get_scaling_api_client(auth),
'batch/v1': self.get_batch_v1_api_client(auth),
'coordination.k8s.io/v1':
self.get_coordination_v1_api_client(auth),
'networking.k8s.io/v1':
self.get_networking_v1_api_client(auth),
'rbac.authorization.k8s.io/v1':
self.get_rbac_authorization_v1_api_client(auth),
'scheduling.k8s.io/v1':
self.get_scheduling_v1_api_client(auth),
'storage.k8s.io/v1':
self.get_storage_v1_api_client(auth)
}
return k8s_client_dict
def get_extension_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.ExtensionsV1beta1Api(api_client=k8s_client)
@ -71,6 +96,42 @@ class KubernetesHTTPAPI(object):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.AppsV1Api(api_client=k8s_client)
def get_api_registration_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.ApiregistrationV1Api(api_client=k8s_client)
def get_authentication_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.AuthenticationV1Api(api_client=k8s_client)
def get_authorization_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.AuthorizationV1Api(api_client=k8s_client)
def get_batch_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.BatchV1Api(api_client=k8s_client)
def get_coordination_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.CoordinationV1Api(api_client=k8s_client)
def get_networking_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.NetworkingV1Api(api_client=k8s_client)
def get_rbac_authorization_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.RbacAuthorizationV1Api(api_client=k8s_client)
def get_scheduling_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.SchedulingV1Api(api_client=k8s_client)
def get_storage_v1_api_client(self, auth):
k8s_client = self.get_k8s_client(auth_plugin=auth)
return client.StorageV1Api(api_client=k8s_client)
@staticmethod
def create_ca_cert_tmp_file(ca_cert):
file_descriptor, file_path = tempfile.mkstemp()

View File

@ -139,6 +139,18 @@ class Invalid(TackerException):
message = _("Bad Request - Invalid Parameters")
class CreateApiFalse(TackerException):
message = _('Failed to create resource.')
class InitApiFalse(TackerException):
message = _('Failed to init resource.')
class ReadEndpoindsFalse(TackerException):
message = _('The method to read a resource failed.')
class InvalidInput(BadRequest):
message = _("Invalid input for operation: %(error_message)s.")

View File

@ -222,7 +222,7 @@ class VnfInstantiatedInfo(model_base.BASE, models.SoftDeleteMixin,
vnf_virtual_link_resource_info = sa.Column(sa.JSON(), nullable=True)
virtual_storage_resource_info = sa.Column(sa.JSON(), nullable=True)
vnf_state = sa.Column(sa.String(255), nullable=False)
instance_id = sa.Column(sa.String(255), nullable=True)
instance_id = sa.Column(sa.Text(), nullable=True)
instantiation_level_id = sa.Column(sa.String(255), nullable=True)
additional_params = sa.Column(sa.JSON(), nullable=True)
@ -241,7 +241,7 @@ class VnfResource(model_base.BASE, models.SoftDeleteMixin,
vnf_instance_id = sa.Column(sa.String(36),
sa.ForeignKey('vnf_instances.id'),
nullable=False)
resource_name = sa.Column(sa.String(255), nullable=True)
resource_name = sa.Column(sa.Text(), nullable=True)
resource_type = sa.Column(sa.String(255), nullable=False)
resource_identifier = sa.Column(sa.String(255), nullable=False)
resource_status = sa.Column(sa.String(255), nullable=False)

View File

@ -1 +1 @@
745e3e9fe5e2
aaf461c8844c

View File

@ -0,0 +1,42 @@
# Copyright (C) 2020 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""change type for vnf_resources and vnf_instantiated_info table
Revision ID: aaf461c8844c
Revises: 745e3e9fe5e2
Create Date: 2020-09-17 03:17:42.570250
"""
# flake8: noqa: E402
# revision identifiers, used by Alembic.
revision = 'aaf461c8844c'
down_revision = '745e3e9fe5e2'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
op.alter_column('vnf_instantiated_info',
'instance_id',
type_=sa.Text(),
nullable=True)
op.alter_column('vnf_resources',
'resource_name',
type_=sa.Text(),
nullable=True)

View File

@ -100,6 +100,16 @@ class VNFDNotFound(exceptions.NotFound):
message = _('VNFD %(vnfd_id)s could not be found')
class CnfDefinitionNotFound(exceptions.NotFound):
message = _(
"CNF definition file with path %(path)s "
"is not found in vnf_artifacts.")
class CNFCreateWaitFailed(exceptions.TackerException):
message = _('CNF Create Failed with reason: %(reason)s')
class ServiceTypeNotFound(exceptions.NotFound):
message = _('service type %(service_type_id)s could not be found')

View File

@ -207,8 +207,8 @@ class InstantiateVnfRequest(base.TackerObject):
'VimConnectionInfo', nullable=True, default=[]),
'ext_virtual_links': fields.ListOfObjectsField(
'ExtVirtualLinkData', nullable=True, default=[]),
'additional_params': fields.DictOfStringsField(nullable=True,
default={}),
'additional_params': fields.DictOfNullableField(nullable=True,
default={})
}
@classmethod

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: curry-endpoint-test001
namespace: curry-ns
labels:
role: my-curry
spec:
containers:
- image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
name: local
ports:
- containerPort: 8080

View File

@ -0,0 +1,22 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml
Name: Files/images/cirros-0.4.0-x86_64-disk.img
Content-type: application/x-iso9066-image
Name: Scripts/install.sh
Content-Type: test-data
Algorithm: SHA-256
Hash: 27bbdb25d8f4ed6d07d6f6581b86515e8b2f0059b236ef7b6f50d6674b34f02a
Name: Scripts/install.sh
Content-Type: test-data
Algorithm: SHA-256
Hash: 27bbdb25d8f4ed6d07d6f6581b86515e8b2f0059b236ef7b6f50d6674b34f02a
Name: Files/kubernetes/deployment.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: e23cc3433835cea32ce790b4823313dc6d0744dce02e27b1b339c87ee993b8c2

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Binding
metadata:
name: curry-endpoint-test001
namespace: default
target:
apiVersion: v1
kind: Node
namespace: default
name: k8-worker2

View File

@ -0,0 +1,29 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: curry-cluster-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: curry-cluster-sa
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: curry-cluster-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: curry-cluster-role
subjects:
- apiGroup: ""
kind: ServiceAccount
name: curry-cluster-sa
namespace: default

View File

@ -0,0 +1,8 @@
apiVersion: v1
data:
param0: key1
param1: key2
kind: ConfigMap
metadata:
name: curry-test001
namespace: default

View File

@ -0,0 +1,8 @@
apiVersion: apps/v1
kind: ControllerRevision
data:
raw: test
metadata:
name: curry-test001
namespace: default
revision: 1

View File

@ -0,0 +1,19 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx
namespace: default
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80

View File

@ -0,0 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: curry-probe-test001
namespace: default
spec:
replicas: 1
selector:
matchLabels:
selector: curry-probe-test001
template:
metadata:
labels:
selector: curry-probe-test001
app: webserver
spec:
containers:
- name: nginx-liveness-probe
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
protocol: TCP
- image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
name: kuryr-demo-readiness-probe
ports:
- containerPort: 8080
protocol: TCP

View File

@ -0,0 +1,13 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: curry-hpa-vdu001
namespace: default
spec:
maxReplicas: 3
minReplicas: 1
scaleTargetRef:
apiVersion: extensions/v1beta1
kind: Deployment
name: curry-svc-vdu001
targetCPUUtilizationPercentage: 40

View File

@ -0,0 +1,25 @@
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: curryjob
name: curryjob
namespace: default
spec:
completions: 5
parallelism: 2
template:
metadata:
creationTimestamp: null
labels:
run: curryjob
spec:
containers:
- command: ["sh", "-c"]
args:
- echo CURRY
image: celebdor/kuryr-demo
name: curryjob
restartPolicy: OnFailure
status: {}

View File

@ -0,0 +1,10 @@
apiVersion: "v1"
kind: "LimitRange"
metadata:
name: "limits"
namespace: default
spec:
limits:
- type: "Container"
defaultRequest:
cpu: "100m"

View File

@ -0,0 +1,11 @@
apiVersion: authorization.k8s.io/v1
kind: LocalSubjectAccessReview
metadata:
namespace: default
spec:
user: curry-sa
resourceAttributes:
group: apps
resource: deployments
verb: create
namespace: default

View File

@ -0,0 +1,8 @@
apiVersion: coordination.k8s.io/v1
kind: Lease
metadata:
name: curry-lease
namespace: default
spec:
holderIdentity: master
leaseDurationSeconds: 40

View File

@ -0,0 +1,17 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: all-deny
namespace: default
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- ports:
- port: 53
protocol: UDP
- port: 53
protocol: TCP
to:
- namespaceSelector: {}

View File

@ -0,0 +1,7 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: high-priority
value: 1000000
globalDefault: false
description: "Priority Class Test"

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: curry

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: curry-sc-pv
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Gi
hostPath:
path: /data/curry-sc-test
type: DirectoryOrCreate
persistentVolumeReclaimPolicy: Delete
storageClassName: curry-sc-local

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: curry-sc-pv-0
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 1Gi
hostPath:
path: /data/curry-sc-test-1
type: DirectoryOrCreate
persistentVolumeReclaimPolicy: Delete
storageClassName: curry-sc-local

View File

@ -0,0 +1,43 @@
apiVersion: v1
kind: PodTemplate
metadata:
name: curry-test001
namespace: default
template:
metadata:
labels:
app: webserver
scaling_name: SP1
spec:
containers:
- env:
- name: param0
valueFrom:
configMapKeyRef:
key: param0
name: curry-test001
- name: param1
valueFrom:
configMapKeyRef:
key: param1
name: curry-test001
image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
name: web-server
ports:
- containerPort: 8080
resources:
limits:
cpu: 500m
memory: 512M
requests:
cpu: 500m
memory: 512M
volumeMounts:
- name: curry-claim-volume
mountPath: /data
volumes:
- name: curry-claim-volume
persistentVolumeClaim:
claimName: curry-pv-claim
terminationGracePeriodSeconds: 0

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
namespace: default
name: curry-endpoint-test001
spec:
containers:
- image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
name: web-server
ports:
- containerPort: 8080

View File

@ -0,0 +1,51 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
app: webserver
vdu_name: curry-svc-vdu001
name: curry-svc-vdu001-multiple
namespace: default
spec:
ports:
- name: "80"
port: 80
targetPort: 8080
selector:
app: webserver
type: ClusterIP
---
apiVersion: v1
data:
param0: a2V5MQ==
param1: a2V5Mg==
kind: Secret
metadata:
name: curry-sc-multiple
namespace: default
---
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: curry-replicaset-multiple
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: webserver
template:
metadata:
labels:
app: webserver
spec:
containers:
- image: nginx
name: nginx
env:
- name: param0
valueFrom:
secretKeyRef:
key: param0
name: curry-sc-multiple

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: curry-rq
namespace: default
spec:
hard:
cpu: "1000m"
memory: 2Gi
scopes:
- NotBestEffort

View File

@ -0,0 +1,31 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: curry-role
namespace: default
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: curry-sa
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: curry-rolebinding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: curry-role
subjects:
- apiGroup: ""
kind: ServiceAccount
name: curry-sa
namespace: default

View File

@ -0,0 +1,14 @@
---
apiVersion: authorization.k8s.io/v1
kind: SelfSubjectAccessReview
spec:
resourceAttributes:
group: apps
resource: deployments
verb: create
namespace: default
---
apiVersion: authorization.k8s.io/v1
kind: SelfSubjectRulesReview
spec:
namespace: default

View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: curry-ns-statefulset
namespace: default
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes:
- ReadWriteOnce
storageClassName: "curry-sc-local"
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,6 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: curry-sc-local
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate

View File

@ -0,0 +1,37 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: curry-sc-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
storageClassName: my-storage-class
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: curry-sc-pv-1
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 2Gi
hostPath:
path: /data/curry-sc-test
type: DirectoryOrCreate
persistentVolumeReclaimPolicy: Delete
storageClassName: my-storage-class
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: my-storage-class
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate

View File

@ -0,0 +1,9 @@
apiVersion: authorization.k8s.io/v1
kind: SubjectAccessReview
spec:
user: curry-sa
resourceAttributes:
group: apps
resource: deployments
verb: create
namespace: default

View File

@ -0,0 +1,9 @@
apiVersion: authentication.k8s.io/v1
kind: TokenReview
metadata:
name: curry-tokenreview-test
spec:
# SA_TOKEN=$(kubectl describe secret $(kubectl get secrets |
# grep curry-sa | cut -f1 -d ' ') | grep -E '^token' |
# cut -f2 -d':' | tr -d '\t'); echo $SA_TOKEN
token: "<SA_TOKEN>"

View File

@ -0,0 +1,139 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml
Name: Files/kubernetes/bindings.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 98df24e1d96ea034dbe14d3288c207e14cf2a674d67d251d351b49cd36e98c46
Name: Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: f808fee02df7230a0e3026f97d745569aba6653a78b043c89bf82d0ba95833bd
Name: Files/kubernetes/config-map.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: c6d71870559226244c47618ff4bfd59e9835c471dea2da84a136434f8f77ada0
Name: Files/kubernetes/controller-revision.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 4042352e0de6aa0ad28d44354bd8e0d62fc8e753c8f52b7edf69d2a7a25d8f8d
Name: Files/kubernetes/daemon-set.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: c0750df79c9ba2824b032b6a485764486b014021aa6dade5ef61f1c10569412f
Name: Files/kubernetes/deployment.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 6a40dfb06764394fb604ae807d1198bc2e2ee8aece3b9483dfde48e53f316a58
Name: Files/kubernetes/horizontal-pod-autoscaler.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: aa95058d04ef61159712e7c567220b3f5c275251d78b0000bc04575f23c55894
Name: Files/kubernetes/job.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: be7239275d10915eec462634247daf3b7f6a95b22c4027c614b2359688565931
Name: Files/kubernetes/limit-range.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 0cd1b42e0993471fed8b0876dcef8122b292aedf430a5ced6a028660a6aede9e
Name: Files/kubernetes/local-subject-access-review.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 01c4348cd59dd69667b92c76910043e067a69950078bea9479fc0a7bb09ff0e7
Name: Files/kubernetes/multiple_yaml_lease.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 03999b641569b3480c8d667b632c85c01ee707a93125343eee71b096181fa8c3
Name: Files/kubernetes/multiple_yaml_network-policy.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 98f8f8a055afe8e8ddfb26b02d938a457226e0a1afa03ef69623a734aec49295
Name: Files/kubernetes/multiple_yaml_priority-class.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 2b5aa46d52f29f0c5d82375a727ef15795d33f5c55c09fc7c3a8774ee713db1f
Name: Files/kubernetes/namespace.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: c2af464e4b1646da9d2e6ccfdc44cf744753459a001c3469135d04dbb56bb293
Name: Files/kubernetes/persistent-volume-0.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: a1e8fe505cb32672eb6d96c9b2e3178a6e0828aa41082c096f9fe29dc64f39f4
Name: Files/kubernetes/persistent-volume-1.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 934bb29d10c75053c244c9acb1cb259c4a5616dbe931a02da8072322aa76cabc
Name: Files/kubernetes/pod-template.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 5d4d3d399e04cdba1f9c691ac7e690e295ff02b7c935abae873b68a83a858c50
Name: Files/kubernetes/pod.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: a708dcf5ba4d3a7c675f18b71484a32b7e4446e80e57dcc3035b8a921c3f659d
Name: Files/kubernetes/replicaset_service_secret.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 8ed52e5e167890efd7fba29c748f717dff01d68b60ff9a06af178cbafdfdc765
Name: Files/kubernetes/resource-quota.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 0cf5e5b69f0752a8c9b5ebb09aee2dccf49d53b580c0c1cb260a95d7f92c7861
Name: Files/kubernetes/role_rolebinding_SA.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 5d67ef70773d1673c3a115ab0f2fe2efebc841acaeafad056444e23e23664bbc
Name: Files/kubernetes/self-subject-access-review_and_self-subject-rule-review.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 83bd9c40db8c798d0cab0e793a4b40a4ac7eca4fec4fba89ab4257d0f397db40
Name: Files/kubernetes/statefulset.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: d0beddd39f6808cb62094146778961b068871393df3474e0787145639a94f649
Name: Files/kubernetes/storage-class.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: ccde582b3c81019991a2753a73061f5954cf1fd5f5dfa2e4a0e2b4458b424cf5
Name: Files/kubernetes/storage-class_pv_pvc.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: edc5e4d0b6c8e0c7e0e9ce199aa2b36b95d36442ff3daf309fb46f784ad14722
Name: Files/kubernetes/subject-access-review.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: ef937e9c90c1cb6093092ba2043c11e353d572736b04f798a49b785049fec552
Name: Files/kubernetes/token-review.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 468d9d53a3125c5850c6473d324c94f00b91a1e3536d1a62c7c7eb80fd7aa6d2

View File

@ -0,0 +1,6 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: curry-sc-local-1
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate

View File

@ -0,0 +1,9 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml
Name: http://127.0.0.1:44380/storage-class-url.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: dedcaa9f6c51ef9fa92bad9b870f7fd91fd3f6680a4f7af17d6bf4ff7dd2016f

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,942 @@
# Copyright (C) 2020 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kubernetes import client
CREATE_K8S_FALSE_VALUE = None
def fake_k8s_dict():
k8s_client_dict = {
'namespace': 'curryns',
'object': fake_k8s_obj()
}
return k8s_client_dict
def fake_k8s_obj():
return client.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1DeploymentSpec(
replicas=2,
selector=client.V1LabelSelector(
match_labels={'app': 'webserver'}
),
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={'app': 'webserver',
'scaling_name': 'SP1'}
),
spec=client.V1PodSpec(
containers=[
client.V1Container(
env=[
client.V1EnvVar(
name='param0',
value_from=client.V1EnvVarSource(
config_map_key_ref=client.
V1ConfigMapKeySelector(
key='param0',
name='curry-test001'
)
)
),
client.V1EnvVar(
name='param1',
value_from=client.V1EnvVarSource(
config_map_key_ref=client.
V1ConfigMapKeySelector(
key='param1',
name='curry-test001'
)
)
)
],
image='celebdor/kuryr-demo',
image_pull_policy='IfNotPresent',
name='web-server',
ports=[
client.V1ContainerPort(
container_port=8080
)
],
resources=client.V1ResourceRequirements(
limits={
'cpu': '500m', 'memory': '512M'
},
requests={
'cpu': '500m', 'memory': '512M'
}
),
volume_mounts=[
client.V1VolumeMount(
name='curry-claim-volume',
mount_path='/data'
)
]
)
],
volumes=[
client.V1Volume(
name='curry-claim-volume',
persistent_volume_claim=client.
V1PersistentVolumeClaimVolumeSource(
claim_name='curry-pv-claim'
)
)
],
termination_grace_period_seconds=0
)
)
)
)
def fake_k8s_client_dict():
k8s_client_dict = {
'v1': client.CoreV1Api(),
'apiregistration.k8s.io/v1': client.ApiregistrationV1Api(),
'apps/v1': client.AppsV1Api(),
'authentication.k8s.io/v1': client.AuthenticationV1Api(),
'authorization.k8s.io/v1': client.AuthorizationV1Api(),
'autoscaling/v1': client.AutoscalingV1Api(),
'batch/v1': client.BatchV1Api(),
'coordination.k8s.io/v1': client.CoordinationV1Api(),
'networking.k8s.io/v1': client.NetworkingV1Api(),
'rbac.authorization.k8s.io/v1': client.RbacAuthorizationV1Api(),
'scheduling.k8s.io/v1': client.SchedulingV1Api(),
'storage.k8s.io/v1': client.StorageV1Api()
}
return k8s_client_dict
def fake_k8s_objs_node():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_node()
}
]
return objs
def fake_node():
return client.V1Node(
api_version='v1',
kind='Node',
metadata=client.V1ObjectMeta(
name='curry-node-test',
labels={'name': 'curry-node-test'}
),
status=client.V1NodeStatus(
conditions=[
client.V1NodeCondition(
status='True',
type='Ready'
)
]
)
)
def fake_k8s_objs_node_status_false():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_node_false()
}
]
return objs
def fake_node_false():
return client.V1Node(
api_version='v1',
kind='Node',
metadata=client.V1ObjectMeta(
name='curry-node-test',
labels={'name': 'curry-node-test'}
),
status=client.V1NodeStatus(
conditions=[
client.V1NodeCondition(
status='False',
type='Ready'
)
]
)
)
def fake_k8s_objs_pvc():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_pvc()
}
]
return objs
def fake_pvc():
return client.V1PersistentVolumeClaim(
api_version='v1',
kind='PersistentVolumeClaim',
metadata=client.V1ObjectMeta(
name='curry-sc-pvc'
),
status=client.V1PersistentVolumeClaimStatus(
phase='Bound'
)
)
def fake_k8s_objs_pvc_false_phase():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_pvc_false()
}
]
return objs
def fake_pvc_false():
return client.V1PersistentVolumeClaim(
api_version='v1',
kind='PersistentVolumeClaim',
metadata=client.V1ObjectMeta(
name='curry-sc-pvc'
),
status=client.V1PersistentVolumeClaimStatus(
phase='UnBound'
)
)
def fake_k8s_objs_namespace():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_namespace()
}
]
return objs
def fake_namespace():
return client.V1Namespace(
api_version='v1',
kind='Namespace',
metadata=client.V1ObjectMeta(
name='curry-ns'
),
status=client.V1NamespaceStatus(
phase='Active'
)
)
def fake_k8s_objs_namespace_false_phase():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_namespace_false()
}
]
return objs
def fake_namespace_false():
return client.V1Namespace(
api_version='v1',
kind='Namespace',
metadata=client.V1ObjectMeta(
name='curry-ns'
),
status=client.V1NamespaceStatus(
phase='NotActive'
)
)
def fake_k8s_objs_service():
objs = [
{
'namespace': 'default',
'status': 'Creating',
'object': fake_service()
}
]
return objs
def fake_service():
return client.V1Service(
api_version='v1',
kind='Service',
metadata=client.V1ObjectMeta(
labels={
'app': 'webserver',
'vdu_name': 'curry-svc-vdu001'
},
name='curry-svc-vdu001',
namespace='default'
),
spec=client.V1ServiceSpec(
cluster_ip=''
)
)
def fake_k8s_objs_service_false_cluster_ip():
objs = [
{
'namespace': 'default',
'status': 'Creating',
'object': fake_service_false()
}
]
return objs
def fake_service_false():
return client.V1Service(
api_version='v1',
kind='Service',
metadata=client.V1ObjectMeta(
labels={
'app': 'webserver',
'vdu_name': 'curry-svc-vdu001'
},
name='curry-svc-vdu001',
namespace='default'
),
spec=client.V1ServiceSpec(
cluster_ip='127.0.0.1'
)
)
def fake_endpoinds():
return client.V1Endpoints(
api_version='v1',
kind='Endpoints',
metadata=client.V1ObjectMeta(
namespace='default'
)
)
def fake_k8s_objs_deployment():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_deployment()
}
]
return obj
def fake_k8s_objs_deployment_error():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_deployment_error()
}
]
return obj
def fake_k8s_objs_replica_set():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_replica_set()
}
]
return obj
def fake_k8s_objs_replica_set_error():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_replica_set_error()
}
]
return obj
def fake_k8s_objs_stateful_set():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_stateful_set()
}
]
return obj
def fake_k8s_objs_stateful_set_error():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_stateful_set_error()
}
]
return obj
def fake_k8s_objs_job():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_job()
}
]
return obj
def fake_k8s_objs_job_error():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_job_error()
}
]
return obj
def fake_k8s_objs_volume_attachment():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_volume_attachment()
}
]
return obj
def fake_k8s_objs_volume_attachment_error():
obj = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_v1_volume_attachment_error()
}
]
return obj
def fake_v1_deployment():
return client.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1DeploymentStatus(
replicas=1,
ready_replicas=1
)
)
def fake_v1_deployment_error():
return client.V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1DeploymentStatus(
replicas=2,
ready_replicas=1
)
)
def fake_v1_replica_set():
return client.V1ReplicaSet(
api_version='apps/v1',
kind='ReplicaSet',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1ReplicaSetStatus(
replicas=1,
ready_replicas=1
)
)
def fake_v1_replica_set_error():
return client.V1ReplicaSet(
api_version='apps/v1',
kind='ReplicaSet',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1ReplicaSetStatus(
replicas=2,
ready_replicas=1
)
)
def fake_v1_job():
return client.V1Job(
api_version='batch/v1',
kind='Job',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1JobSpec(
completions=1,
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1PodSpec(
hostname='job',
containers=['image']
)
)
),
status=client.V1JobStatus(
succeeded=1,
)
)
def fake_v1_job_error():
return client.V1Job(
api_version='batch/v1',
kind='Job',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1JobSpec(
completions=1,
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1PodSpec(
hostname='job',
containers=['image']
)
)
),
status=client.V1JobStatus(
succeeded=2,
)
)
def fake_v1_volume_attachment():
return client.V1VolumeAttachment(
api_version='storage.k8s.io/v1',
kind='VolumeAttachment',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1VolumeAttachmentSpec(
attacher='nginx',
node_name='nginx',
source=client.V1VolumeAttachmentSource(
persistent_volume_name='curry-sc-pvc'
)
),
status=client.V1VolumeAttachmentStatus(
attached=True,
)
)
def fake_v1_volume_attachment_error():
return client.V1VolumeAttachment(
api_version='storage.k8s.io/v1',
kind='VolumeAttachment',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1VolumeAttachmentSpec(
attacher='nginx',
node_name='nginx',
source=client.V1VolumeAttachmentSource(
persistent_volume_name='curry-sc-pvc'
)
),
status=client.V1VolumeAttachmentStatus(
attached=False,
)
)
def fake_v1_stateful_set():
return client.V1StatefulSet(
api_version='apps/v1',
kind='StatefulSet',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1StatefulSetSpec(
replicas=1,
volume_claim_templates=[
client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(
name='www'
)
)
],
selector=client.V1LabelSelector(
match_labels={'app': 'nginx'}
),
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
)
),
service_name='nginx'
),
status=client.V1StatefulSetStatus(
replicas=1,
ready_replicas=1
),
)
def fake_v1_stateful_set_error():
return client.V1StatefulSet(
api_version='apps/v1',
kind='StatefulSet',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
spec=client.V1StatefulSetSpec(
replicas=1,
volume_claim_templates=[
client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(
name='www'
)
)
],
selector=client.V1LabelSelector(
match_labels={'app': 'nginx'}
),
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
)
),
service_name='nginx'
),
status=client.V1StatefulSetStatus(
replicas=2,
ready_replicas=1
)
)
def fake_v1_persistent_volume_claim():
return client.V1PersistentVolumeClaim(
api_version='v1',
kind='PersistentVolumeClaim',
metadata=client.V1ObjectMeta(
name='www-curry-test001-0',
namespace='curryns'
),
status=client.V1PersistentVolumeClaimStatus(
phase='Bound'
)
)
def fake_v1_persistent_volume_claim_error():
return client.V1PersistentVolumeClaim(
api_version='v1',
kind='PersistentVolumeClaim',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1PersistentVolumeClaimStatus(
phase='Bound1'
)
)
def fake_k8s_objs_pod():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_pod()
}
]
return objs
def fake_k8s_objs_pod_error():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_pod_error()
}
]
return objs
def fake_pod():
return client.V1Pod(
api_version='v1',
kind='Pod',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1PodStatus(
phase='Running',
)
)
def fake_pod_error():
return client.V1Pod(
api_version='v1',
kind='Pod',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1PodStatus(
phase='Terminated',
)
)
def fake_k8s_objs_persistent_volume():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_persistent_volume()
}
]
return objs
def fake_k8s_objs_persistent_volume_error():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_persistent_volume_error()
}
]
return objs
def fake_persistent_volume():
return client.V1PersistentVolume(
api_version='v1',
kind='PersistentVolume',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1PersistentVolumeStatus(
phase='Available',
)
)
def fake_persistent_volume_error():
return client.V1PersistentVolume(
api_version='v1',
kind='PersistentVolume',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1PersistentVolumeStatus(
phase='UnBound',
)
)
def fake_k8s_objs_api_service():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_api_service()
}
]
return objs
def fake_k8s_objs_api_service_error():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_api_service_error()
}
]
return objs
def fake_api_service():
return client.V1APIService(
api_version='apiregistration.k8s.io/v1',
kind='APIService',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1APIServiceStatus(
conditions=[
client.V1APIServiceCondition(
type='Available',
status='True'
)
]
)
)
def fake_api_service_error():
return client.V1APIService(
api_version='apiregistration.k8s.io/v1',
kind='APIService',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1APIServiceStatus(
conditions=[
client.V1APIServiceCondition(
type='Unavailable',
status='True'
)
]
)
)
def fake_k8s_objs_daemon_set():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_daemon_set()
}
]
return objs
def fake_k8s_objs_daemon_set_error():
objs = [
{
'namespace': 'test',
'status': 'Creating',
'object': fake_daemon_set_error()
}
]
return objs
def fake_daemon_set():
return client.V1DaemonSet(
api_version='apps/v1',
kind='DaemonSet',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1DaemonSetStatus(
number_ready=13,
desired_number_scheduled=13,
current_number_scheduled=4,
number_misscheduled=2,
)
)
def fake_daemon_set_error():
return client.V1DaemonSet(
api_version='apps/v1',
kind='DaemonSet',
metadata=client.V1ObjectMeta(
name='curry-test001',
namespace='curryns'
),
status=client.V1DaemonSetStatus(
number_ready=13,
desired_number_scheduled=12,
current_number_scheduled=4,
number_misscheduled=2,
)
)

View File

@ -0,0 +1,9 @@
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
name: v1beta1.currytest.k8s.io
spec:
group: currytest.k8s.io
groupPriorityMinimum: 17000
version: v1beta1
versionPriority: 5

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Binding
metadata:
name: curry-test001
namespace: curryns
target:
apiVersion: v1
kind: Node
namespace: curryns
name: curry-endpoint-test001

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: curry-cluster-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: curry-cluster-role
subjects:
- apiGroup: ""
kind: ServiceAccount
name: curry-cluster-sa
namespace: default

View File

@ -0,0 +1,8 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: curry-cluster-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]

View File

@ -0,0 +1,8 @@
apiVersion: v1
data:
param0: key1
param1: key2
kind: ConfigMap
metadata:
name: curry-test001
namespace: curryns

View File

@ -0,0 +1,6 @@
apiVersion: apps/v1
kind: ControllerRevision
metadata:
name: curry-test001
namespace: curryns
revision: 1

View File

@ -0,0 +1,16 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx

View File

@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: curry-probe-test001
spec:
replicas: 1
selector:
matchLabels:
selector: curry-probe-test001
template:
metadata:
labels:
selector: curry-probe-test001
app: webserver
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx-liveness-probe
ports:
- containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
port: 80
path: /
failureThreshold: 5
periodSeconds: 5
- image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
name: kuryr-demo-readiness-probe
ports:
- containerPort: 8080
protocol: TCP
readinessProbe:
httpGet:
port: 8080
path: /
failureThreshold: 2
periodSeconds: 2

View File

@ -0,0 +1,13 @@
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: curry-svc-vdu001
namespace: default
spec:
maxReplicas: 3
minReplicas: 1
scaleTargetRef:
apiVersion: extensions/v1beta1
kind: Deployment
name: curry-svc-vdu001
targetCPUUtilizationPercentage: 40

View File

@ -0,0 +1,27 @@
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: curryjob
name: curryjob
spec:
completions: 5
parallelism: 2
template:
metadata:
creationTimestamp: null
labels:
run: curryjob
spec:
containers:
- command: ["sh", "-c"]
args:
- echo CURRY
image: celebdor/kuryr-demo
name: curryjob
resources:
limits: {}
requests: {}
restartPolicy: OnFailure
status: {}

View File

@ -0,0 +1,8 @@
apiVersion: coordination.k8s.io/v1
kind: Lease
metadata:
name: curry-lease
namespace: default
spec:
holderIdentity: worker02
leaseDurationSeconds: 40

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: LimitRange
metadata:
name: curry-test001
namespace: curryns
spec:
limits:
- default:
cpu: 500m
memory: 512M

View File

@ -0,0 +1,11 @@
apiVersion: authorization.k8s.io/v1
kind: LocalSubjectAccessReview
metadata:
namespace: curry-ns
spec:
user: curry-sa
resourceAttributes:
group: apps
resource: deployments
verb: create
namespace: curry-ns

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: curry-ns

View File

@ -0,0 +1,8 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: all-deny
spec:
podSelector: {}
policyTypes:
- Egress

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Node
metadata:
name: curry-node-test
labels:
name: curry-node-test

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: curry-sc-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 2Gi
storageClassName: curry-sc-local

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: curry-sc-pv
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 2Gi
hostPath:
path: /data/curry-sc-test
type: DirectoryOrCreate
persistentVolumeReclaimPolicy: Delete
storageClassName: curry-sc-local

View File

@ -0,0 +1,43 @@
apiVersion: v1
kind: PodTemplate
metadata:
name: curry-test001
namespace: curryns
template:
metadata:
labels:
app: webserver
scaling_name: SP1
spec:
containers:
- env:
- name: param0
valueFrom:
configMapKeyRef:
key: param0
name: curry-test001
- name: param1
valueFrom:
configMapKeyRef:
key: param1
name: curry-test001
image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
name: web-server
ports:
- containerPort: 8080
resources:
limits:
cpu: 500m
memory: 512M
requests:
cpu: 500m
memory: 512M
volumeMounts:
- name: curry-claim-volume
mountPath: /data
volumes:
- name: curry-claim-volume
persistentVolumeClaim:
claimName: curry-pv-claim
terminationGracePeriodSeconds: 0

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Pod
metadata:
name: curry-endpoint-test001
spec:
containers:
- image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
name: web-server
ports:
- containerPort: 8080

View File

@ -0,0 +1,7 @@
apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: high-priority
value: 1000000
globalDefault: false
description: "Priority Class Test"

View File

@ -0,0 +1,17 @@
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: curry-replicaset
spec:
replicas: 10
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: nginx
name: nginx

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: curry-rq
namespace: curryns
spec:
hard:
cpu: "1000m"
memory: 2Gi
scopes:
- NotBestEffort

View File

@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: curry-rolebinding
namespace: curry-ns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: curry-role
subjects:
- apiGroup: ""
kind: ServiceAccount
name: curry-sa
namespace: default

View File

@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: curry-role
namespace: curry-ns
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list"]

View File

@ -0,0 +1,8 @@
apiVersion: v1
data:
param0: a2V5MQ==
param1: a2V5Mg==
kind: Secret
metadata:
name: curry-sc
namespace: default

View File

@ -0,0 +1,8 @@
apiVersion: authorization.k8s.io/v1
kind: SelfSubjectAccessReview
spec:
resourceAttributes:
group: apps
resource: deployments
verb: create
namespace: curry-ns

View File

@ -0,0 +1,4 @@
apiVersion: authorization.k8s.io/v1
kind: SelfSubjectRulesReview
spec:
namespace: curry-ns

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: curry-cluster-sa
namespace: default

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: webserver
vdu_name: curry-svc-vdu001
name: curry-svc-vdu001
namespace: default
spec:
ports:
- name: http
port: 80
targetPort: 8080
selector:
app: webserver
type: ClusterIP

View File

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: curry-sc-local
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
#volumeBindingMode: Immediate
#reclaimPolicy: Retain

View File

@ -0,0 +1,9 @@
apiVersion: authorization.k8s.io/v1
kind: SubjectAccessReview
spec:
user: curry-sa
resourceAttributes:
group: apps
resource: deployments
verb: create
namespace: curry-ns

View File

@ -0,0 +1,9 @@
apiVersion: authentication.k8s.io/v1
kind: TokenReview
metadata:
name: curry-tokenreview-test
spec:
# SA_TOKEN=$(kubectl describe secret $(kubectl get secrets |
# grep curry-sa | cut -f1 -d ' ') | grep -E '^token' |
# cut -f2 -d':' | tr -d '\t'); echo $SA_TOKEN
token: "<SA_TOKEN>"

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: VolumeAttachment
metadata:
name: curry-test001
namespace: curryns
spec:
attacher: nginx
node_name: nginx
source:
persistent_volume_name: curry-sc-pvc

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,426 @@
# Copyright (C) 2020 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
from tacker.common import exceptions
from tacker.tests.unit import base
from tacker.tests.unit import fake_request
from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
class TestTransformer(base.TestCase):
def setUp(self):
super(TestTransformer, self).setUp()
self.yaml_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"kubernetes_api_resource/")
self.k8s_client_dict = fakes.fake_k8s_client_dict()
self.transfromer = translate_outputs.Transformer(
None, None, None, self.k8s_client_dict
)
def test_deploy_k8s_create_false(self):
kubernetes_objects = []
k8s_obj = fakes.fake_k8s_dict()
kubernetes_objects.append(k8s_obj)
self.assertRaises(exceptions.CreateApiFalse,
self.transfromer.deploy_k8s,
kubernetes_objects)
@mock.patch.object(translate_outputs.Transformer,
"_select_k8s_client_and_api")
def test_deploy_k8s(self, mock_k8s_client_and_api):
req = \
fake_request.HTTPRequest.blank(
'apis/apps/v1/namespaces/curryns/deployments')
mock_k8s_client_and_api.return_value = req
kubernetes_objects = []
k8s_obj = fakes.fake_k8s_dict()
kubernetes_objects.append(k8s_obj)
new_k8s_objs = self.transfromer.deploy_k8s(kubernetes_objects)
self.assertEqual(type(new_k8s_objs), list)
self.assertIsNotNone(new_k8s_objs)
self.assertEqual(new_k8s_objs[0]['status'], 'Creating')
def test_deployment(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['deployment.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'Deployment')
self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1')
def test_api_service(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['api-service.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'APIService')
self.assertEqual(k8s_objs[0].get('object').api_version,
'apiregistration.k8s.io/v1')
def test_cluster_role(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['cluster-role.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'ClusterRole')
self.assertEqual(k8s_objs[0].get('object').api_version,
'rbac.authorization.k8s.io/v1')
def test_cluster_role_binding(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['cluster-role-binding.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'ClusterRoleBinding')
self.assertEqual(k8s_objs[0].get('object').api_version,
'rbac.authorization.k8s.io/v1')
def test_config_map(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['config-map.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curryns')
self.assertEqual(k8s_objs[0].get('object').kind,
'ConfigMap')
self.assertEqual(k8s_objs[0].get('object').api_version, 'v1')
def test_daemon_set(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['daemon-set.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'DaemonSet')
self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1')
def test_horizontal_pod_autoscaler(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['horizontal-pod-autoscaler.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'default')
self.assertEqual(k8s_objs[0].get('object').kind,
'HorizontalPodAutoscaler')
self.assertEqual(k8s_objs[0].get('object').api_version,
'autoscaling/v1')
def test_job(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['job.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'Job')
self.assertEqual(k8s_objs[0].get('object').api_version,
'batch/v1')
def test_lease(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['lease.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'default')
self.assertEqual(k8s_objs[0].get('object').kind, 'Lease')
self.assertEqual(k8s_objs[0].get('object').api_version,
'coordination.k8s.io/v1')
def test_local_subject_access_review(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['local-subject-access-review.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns')
self.assertEqual(k8s_objs[0].get('object').kind,
'LocalSubjectAccessReview')
self.assertEqual(k8s_objs[0].get('object').api_version,
'authorization.k8s.io/v1')
def test_namespace(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['namespace.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'Namespace')
self.assertEqual(k8s_objs[0].get('object').api_version, 'v1')
def test_network_policy(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['network-policy.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'NetworkPolicy')
self.assertEqual(k8s_objs[0].get('object').api_version,
'networking.k8s.io/v1')
def test_node(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['node.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'Node')
self.assertEqual(k8s_objs[0].get('object').api_version, 'v1')
def test_persistent_volume(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['persistent-volume.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind, 'PersistentVolume')
self.assertEqual(k8s_objs[0].get('object').api_version, 'v1')
def test_persistent_volume_claim(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['persistent-volume-claim.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'PersistentVolumeClaim')
self.assertEqual(k8s_objs[0].get('object').api_version, 'v1')
def test_pod(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['pod.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'Pod')
self.assertEqual(k8s_objs[0].get('object').api_version, 'v1')
def test_priority_class(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['priority-class.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'PriorityClass')
self.assertEqual(k8s_objs[0].get('object').api_version,
'scheduling.k8s.io/v1')
def test_replica_set(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['replica-set.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'ReplicaSet')
self.assertEqual(k8s_objs[0].get('object').api_version,
'apps/v1')
def test_resource_quota(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['resource-quota.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curryns')
self.assertEqual(k8s_objs[0].get('object').kind,
'ResourceQuota')
self.assertEqual(k8s_objs[0].get('object').api_version,
'v1')
def test_role(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['role.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns')
self.assertEqual(k8s_objs[0].get('object').kind,
'Role')
self.assertEqual(k8s_objs[0].get('object').api_version,
'rbac.authorization.k8s.io/v1')
def test_role_binding(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['role-bindings.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns')
self.assertEqual(k8s_objs[0].get('object').kind,
'RoleBinding')
self.assertEqual(k8s_objs[0].get('object').api_version,
'rbac.authorization.k8s.io/v1')
def test_secret(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['secret.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'default')
self.assertEqual(k8s_objs[0].get('object').kind,
'Secret')
self.assertEqual(k8s_objs[0].get('object').api_version,
'v1')
def test_self_subject_access_review(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['self-subject-access-review.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'SelfSubjectAccessReview')
self.assertEqual(k8s_objs[0].get('object').api_version,
'authorization.k8s.io/v1')
def test_self_subject_rules_review(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['self-subject-rule-review.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'SelfSubjectRulesReview')
self.assertEqual(k8s_objs[0].get('object').api_version,
'authorization.k8s.io/v1')
def test_service(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['service.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'default')
self.assertEqual(k8s_objs[0].get('object').kind,
'Service')
self.assertEqual(k8s_objs[0].get('object').api_version,
'v1')
def test_service_account(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['service-account.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'default')
self.assertEqual(k8s_objs[0].get('object').kind,
'ServiceAccount')
self.assertEqual(k8s_objs[0].get('object').api_version,
'v1')
def test_stateful_set(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['stateful-set.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'StatefulSet')
self.assertEqual(k8s_objs[0].get('object').api_version,
'apps/v1')
def test_storage_class(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['storage-class.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'StorageClass')
self.assertEqual(k8s_objs[0].get('object').api_version,
'storage.k8s.io/v1')
def test_subject_access_review(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['subject-access-review.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'SubjectAccessReview')
self.assertEqual(k8s_objs[0].get('object').api_version,
'authorization.k8s.io/v1')
def test_token_review(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['token-review.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), '')
self.assertEqual(k8s_objs[0].get('object').kind,
'TokenReview')
self.assertEqual(k8s_objs[0].get('object').api_version,
'authentication.k8s.io/v1')
def test_limit_range(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['limit-range.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curryns')
self.assertEqual(k8s_objs[0].get('object').kind,
'LimitRange')
self.assertEqual(k8s_objs[0].get('object').api_version,
'v1')
def test_pod_template(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['pod-template.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curryns')
self.assertEqual(k8s_objs[0].get('object').kind,
'PodTemplate')
self.assertEqual(k8s_objs[0].get('object').api_version,
'v1')
def test_volume_attachment(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['volume-attachment.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curryns')
self.assertEqual(k8s_objs[0].get('object').kind,
'VolumeAttachment')
self.assertEqual(k8s_objs[0].get('object').api_version,
'storage.k8s.io/v1')
def test_bindings(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['bindings.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curryns')
self.assertEqual(k8s_objs[0].get('object').kind,
'Binding')
self.assertEqual(k8s_objs[0].get('object').api_version,
'v1')
def test_controller_revision(self):
k8s_objs = self.transfromer.get_k8s_objs_from_yaml(
['controller-revision.yaml'], self.yaml_path
)
self.assertIsNotNone(k8s_objs[0].get('object'))
self.assertEqual(k8s_objs[0].get('namespace'), 'curryns')
self.assertEqual(k8s_objs[0].get('object').kind,
'ControllerRevision')
self.assertEqual(k8s_objs[0].get('object').api_version,
'apps/v1')

View File

@ -17,7 +17,6 @@ import copy
import functools
import inspect
import six
import time
from oslo_config import cfg
from oslo_log import log as logging
@ -155,7 +154,9 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
vim_connection_info.vim_type, 'pre_instantiation_vnf',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info,
vnf_software_images=vnf_software_images)
vnf_software_images=vnf_software_images,
instantiate_vnf_req=instantiate_vnf_req,
vnf_package_path=vnf_package_path)
# save the vnf resources in the db
for _, resources in vnf_resources.items():
@ -275,23 +276,21 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
vim_connection_info, terminate_vnf_req=None,
update_instantiated_state=True):
if vnf_instance.instantiated_vnf_info and \
vnf_instance.instantiated_vnf_info.instance_id:
if (vnf_instance.instantiated_vnf_info and
vnf_instance.instantiated_vnf_info.instance_id) or \
vim_connection_info.vim_type == 'kubernetes':
instance_id = vnf_instance.instantiated_vnf_info.instance_id
instance_id = vnf_instance.instantiated_vnf_info.instance_id \
if vnf_instance.instantiated_vnf_info else None
access_info = vim_connection_info.access_info
LOG.info("Deleting stack %(instance)s for vnf %(id)s ",
{"instance": instance_id, "id": vnf_instance.id})
if terminate_vnf_req:
if (terminate_vnf_req.termination_type == 'GRACEFUL' and
terminate_vnf_req.graceful_termination_timeout > 0):
time.sleep(terminate_vnf_req.graceful_termination_timeout)
self._vnf_manager.invoke(vim_connection_info.vim_type,
'delete', plugin=self, context=context,
vnf_id=instance_id, auth_attr=access_info)
vnf_id=instance_id, auth_attr=access_info,
vnf_instance=vnf_instance, terminate_vnf_req=terminate_vnf_req)
if update_instantiated_state:
vnf_instance.instantiation_state = \
@ -300,7 +299,8 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
self._vnf_manager.invoke(vim_connection_info.vim_type,
'delete_wait', plugin=self, context=context,
vnf_id=instance_id, auth_attr=access_info)
vnf_id=instance_id, auth_attr=access_info,
vnf_instance=vnf_instance)
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, vnf_instance.id)

View File

@ -13,10 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import toscaparser.utils.yamlparser
from urllib.parse import urlparse
import urllib.request as urllib2
import yaml
from kubernetes import client
from oslo_config import cfg
from oslo_log import log as logging
import toscaparser.utils.yamlparser
from tacker.common import exceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -28,16 +36,85 @@ WHITE_SPACE_CHARACTER = ' '
NON_WHITE_SPACE_CHARACTER = ''
HYPHEN_CHARACTER = '-'
DASH_CHARACTER = '_'
# Due to the dependency of k8s resource creation, according to the design,
# other resources (resources not mentioned in self.RESOURCE_CREATION_SORT)
# will be created after the NetworkPolicy resource. This number is a flag
# to ensure that when multiple resources are to be created, the order of
# other resources is after NetworkPolicy and before Service.
OTHER_RESOURCE_SORT_POSITION = 8
class Transformer(object):
"""Transform TOSCA template to Kubernetes resources"""
def __init__(self, core_v1_api_client, app_v1_api_client,
scaling_api_client):
scaling_api_client, k8s_client_dict):
# the old param used when creating vnf with TOSCA template
self.core_v1_api_client = core_v1_api_client
self.app_v1_api_client = app_v1_api_client
self.scaling_api_client = scaling_api_client
# the new param used when instantiating vnf with addtionalParams
self.k8s_client_dict = k8s_client_dict
self.RESOURCE_CREATION_ORDER = [
'StorageClass',
'PersistentVolume',
'PriorityClass',
'Namespace',
'LimitRange',
'ResourceQuota',
'HorizontalPodAutoscaler',
'NetworkPolicy',
'Service',
'Endpoints',
'PersistentVolumeClaim',
'ConfigMap',
'Secret',
'StatefulSet',
'Job',
'Deployment',
'DaemonSet',
'Pod'
]
self.method_value = {
"Pod": 'create_namespaced_pod',
"Service": 'create_namespaced_service',
"ConfigMap": 'create_namespaced_config_map',
"Secret": 'create_namespaced_secret',
"PersistentVolumeClaim":
'create_namespaced_persistent_volume_claim',
"LimitRange": 'create_namespaced_limit_range',
"PodTemplate": 'create_namespaced_pod_template',
"Binding": 'create_namespaced_binding',
"Namespace": 'create_namespace',
"Node": 'create_node',
"PersistentVolume": 'create_persistent_volume',
"ResourceQuota": 'create_namespaced_resource_quota',
"ServiceAccount": 'create_namespaced_service_account',
"APIService": 'create_api_service',
"DaemonSet": 'create_namespaced_daemon_set',
"Deployment": 'create_namespaced_deployment',
"ReplicaSet": 'create_namespaced_replica_set',
"StatefulSet": 'create_namespaced_stateful_set',
"ControllerRevision": 'create_namespaced_controller_revision',
"TokenReview": 'create_token_review',
"LocalSubjectAccessReview": 'create_namespaced_local_'
'subject_access_review',
"SelfSubjectAccessReview": 'create_self_subject_access_review',
"SelfSubjectRulesReview": 'create_self_subject_rules_review',
"SubjectAccessReview": 'create_subject_access_review',
"HorizontalPodAutoscaler": 'create_namespaced_horizontal_'
'pod_autoscaler',
"Job": 'create_namespaced_job',
"Lease": 'create_namespaced_lease',
"NetworkPolicy": 'create_namespaced_network_policy',
"ClusterRole": 'create_cluster_role',
"ClusterRoleBinding": 'create_cluster_role_binding',
"Role": 'create_namespaced_role',
"RoleBinding": 'create_namespaced_role_binding',
"PriorityClass": 'create_priority_class',
"StorageClass": 'create_storage_class',
"VolumeAttachment": 'create_volume_attachment',
}
def transform(self, tosca_kube_objects):
"""transform function translates from tosca_kube_object to
@ -76,12 +153,193 @@ class Transformer(object):
kubernetes_objects['objects'].append(hpa_object)
# translate to Service object
service_object = self.init_service(tosca_kube_obj=tosca_kube_obj,
kube_obj_name=new_kube_obj_name)
service_object = self.init_service(
tosca_kube_obj=tosca_kube_obj,
kube_obj_name=new_kube_obj_name)
kubernetes_objects['objects'].append(service_object)
return kubernetes_objects
def _create_k8s_object(self, kind, file_content_dict):
# must_param referring K8s official object page
# *e.g:https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Service.md
# initiating k8s object, you need to
# give the must param an empty value.
must_param = {
'RuntimeRawExtension': '(raw="")',
'V1LocalSubjectAccessReview': '(spec="")',
'V1HTTPGetAction': '(port="")',
'V1DeploymentSpec': '(selector="", template="")',
'V1PodSpec': '(containers="")',
'V1ConfigMapKeySelector': '(key="")',
'V1Container': '(name="")',
'V1EnvVar': '(name="")',
'V1SecretKeySelector': '(key="")',
'V1ContainerPort': '(container_port="")',
'V1VolumeMount': '(mount_path="", name="")',
'V1PodCondition': '(status="", type="")',
'V1ContainerStatus': '('
'image="", image_id="", '
'name="", ready="", '
'restart_count="")',
'V1ServicePort': '(port="")',
'V1TypedLocalObjectReference': '(kind="", name="")',
'V1LabelSelectorRequirement': '(key="", operator="")',
'V1PersistentVolumeClaimCondition': '(status="", type="")',
'V1AWSElasticBlockStoreVolumeSource': '(volume_id="")',
'V1AzureDiskVolumeSource': '(disk_name="", disk_uri="")',
'V1AzureFileVolumeSource': '(secret_name="", share_name="")',
'V1CephFSVolumeSource': '(monitors=[])',
'V1CinderVolumeSource': '(volume_id="")',
'V1KeyToPath': '(key="", path="")',
'V1CSIVolumeSource': '(driver="")',
'V1DownwardAPIVolumeFile': '(path="")',
'V1ObjectFieldSelector': '(field_path="")',
'V1ResourceFieldSelector': '(resource="")',
'V1FlexVolumeSource': '(driver="")',
'V1GCEPersistentDiskVolumeSource': '(pd_name="")',
'V1GitRepoVolumeSource': '(repository="")',
'V1GlusterfsVolumeSource': '(endpoints="", path="")',
'V1HostPathVolumeSource': '(path="")',
'V1ISCSIVolumeSource': '(iqn="", lun=0, target_portal="")',
'V1Volume': '(name="")',
'V1NFSVolumeSource': '(path="", server="")',
'V1PersistentVolumeClaimVolumeSource': '(claim_name="")',
'V1PhotonPersistentDiskVolumeSource': '(pd_id="")',
'V1PortworxVolumeSource': '(volume_id="")',
'V1ProjectedVolumeSource': '(sources=[])',
'V1ServiceAccountTokenProjection': '(path="")',
'V1QuobyteVolumeSource': '(registry="", volume="")',
'V1RBDVolumeSource': '(image="", monitors=[])',
'V1ScaleIOVolumeSource': '('
'gateway="", secret_ref="", '
'system="")',
'V1VsphereVirtualDiskVolumeSource': '(volume_path="")',
'V1LimitRangeSpec': '(limits=[])',
'V1Binding': '(target="")',
'V1ComponentCondition': '(status="", type="")',
'V1NamespaceCondition': '(status="", type="")',
'V1ConfigMapNodeConfigSource': '(kubelet_config_key="", '
'name="", namespace="")',
'V1Taint': '(effect="", key="")',
'V1NodeAddress': '(address="", type="")',
'V1NodeCondition': '(status="", type="")',
'V1DaemonEndpoint': '(port=0)',
'V1ContainerImage': '(names=[])',
'V1NodeSystemInfo': '(architecture="", boot_id="", '
'container_runtime_version="",'
'kernel_version="", '
'kube_proxy_version="", '
'kubelet_version="",'
'machine_id="", operating_system="", '
'os_image="", system_uuid="")',
'V1AttachedVolume': '(device_path="", name="")',
'V1ScopedResourceSelectorRequirement':
'(operator="", scope_name="")',
'V1APIServiceSpec': '(group_priority_minimum=0, '
'service="", version_priority=0)',
'V1APIServiceCondition': '(status="", type="")',
'V1DaemonSetSpec': '(selector="", template="")',
'V1ReplicaSetSpec': '(selector="")',
'V1StatefulSetSpec': '(selector="", '
'service_name="", template="")',
'V1StatefulSetCondition': '(status="", type="")',
'V1StatefulSetStatus': '(replicas="")',
'V1ControllerRevision': '(revision=0)',
'V1TokenReview': '(spec="")',
'V1SubjectAccessReviewStatus': '(allowed=True)',
'V1SelfSubjectAccessReview': '(spec="")',
'V1SelfSubjectRulesReview': '(spec="")',
'V1SubjectRulesReviewStatus': '(incomplete=True, '
'non_resource_rules=[], '
'resource_rules=[])',
'V1NonResourceRule': '(verbs=[])',
'V1SubjectAccessReview': '(spec="")',
'V1HorizontalPodAutoscalerSpec':
'(max_replicas=0, scale_target_ref="")',
'V1CrossVersionObjectReference': '(kind="", name="")',
'V1HorizontalPodAutoscalerStatus':
'(current_replicas=0, desired_replicas=0)',
'V1JobSpec': '(template="")',
'V1NetworkPolicySpec': '(pod_selector="")',
'V1PolicyRule': '(verbs=[])',
'V1ClusterRoleBinding': '(role_ref="")',
'V1RoleRef': '(api_group="", kind="", name="")',
'V1Subject': '(kind="", name="")',
'V1RoleBinding': '(role_ref="")',
'V1PriorityClass': '(value=0)',
'V1StorageClass': '(provisioner="")',
'V1TopologySelectorLabelRequirement': '(key="", values=[])',
'V1VolumeAttachment': '(spec="")',
'V1VolumeAttachmentSpec':
'(attacher="", node_name="", source="")',
'V1VolumeAttachmentStatus': '(attached=True)',
}
whole_kind = 'V1' + kind
if whole_kind in must_param.keys():
k8s_obj = eval('client.V1' + kind + must_param.get(whole_kind))
else:
k8s_obj = eval('client.V1' + kind + '()')
self._init_k8s_obj(k8s_obj, file_content_dict, must_param)
return k8s_obj
def get_k8s_objs_from_yaml(self, artifact_files, vnf_package_path):
k8s_objs = []
for artifact_file in artifact_files:
if ((urlparse(artifact_file).scheme == 'file') or
(bool(urlparse(artifact_file).scheme) and
bool(urlparse(artifact_file).netloc))):
file_content = urllib2.urlopen(artifact_file).read()
else:
artifact_file_path = os.path.join(
vnf_package_path, artifact_file)
with open(artifact_file_path, 'r') as f:
file_content = f.read()
file_content_dicts = list(yaml.safe_load_all(file_content))
for file_content_dict in file_content_dicts:
k8s_obj = {}
kind = file_content_dict.get('kind', '')
try:
k8s_obj['object'] = self._create_k8s_object(
kind, file_content_dict)
except Exception as e:
if isinstance(e, client.rest.ApiException):
msg = \
_('{kind} create failure. Reason={reason}'.format(
kind=file_content_dict.get('kind', ''),
reason=e.body))
else:
msg = \
_('{kind} create failure. Reason={reason}'.format(
kind=file_content_dict.get('kind', ''),
reason=e))
LOG.error(msg)
raise exceptions.InitApiFalse(error=msg)
if not file_content_dict.get('metadata', ''):
k8s_obj['namespace'] = ''
elif file_content_dict.get('metadata', '').\
get('namespace', ''):
k8s_obj['namespace'] = \
file_content_dict.get('metadata', '').get(
'namespace', '')
else:
k8s_obj['namespace'] = ''
k8s_objs.append(k8s_obj)
return k8s_objs
def _select_k8s_client_and_api(
self, kind, namespace, api_version, body):
k8s_client_obj = self.k8s_client_dict[api_version]
if 'namespaced' in self.method_value[kind]:
response = getattr(k8s_client_obj, self.method_value.get(kind))(
namespace=namespace, body=body
)
else:
response = getattr(k8s_client_obj, self.method_value.get(kind))(
body=body
)
return response
def deploy(self, kubernetes_objects):
"""Deploy Kubernetes objects on Kubernetes VIM and return
@ -107,7 +365,7 @@ class Transformer(object):
LOG.debug('Successfully created Deployment %s',
k8s_object.metadata.name)
elif object_type == 'HorizontalPodAutoscaler':
self.scaling_api_client.\
self.scaling_api_client. \
create_namespaced_horizontal_pod_autoscaler(
namespace=namespace,
body=k8s_object)
@ -127,6 +385,107 @@ class Transformer(object):
# namespace1,deployment1,namespace2,deployment2,namespace3,deployment3
return ",".join(deployment_names)
def deploy_k8s(self, kubernetes_objects):
"""Deploy kubernetes
Deploy Kubernetes objects on Kubernetes VIM and
return a list name of services
"""
kubernetes_objects = self._sort_k8s_obj(kubernetes_objects)
new_k8s_objs = list()
for kubernetes_object in kubernetes_objects:
namespace = kubernetes_object.get('namespace', '')
kind = kubernetes_object.get('object', '').kind
api_version = kubernetes_object.get('object', '').api_version
body = kubernetes_object.get('object', '')
if kubernetes_object.get('object', '').metadata:
name = kubernetes_object.get('object', '').metadata.name
else:
name = ''
try:
LOG.debug("{kind} begin create.".format(kind=kind))
self._select_k8s_client_and_api(
kind, namespace, api_version, body)
kubernetes_object['status'] = 'Creating'
except Exception as e:
if isinstance(e, client.rest.ApiException):
kubernetes_object['status'] = 'creating_failed'
msg = '''The request to create a resource failed.
namespace: {namespace}, name: {name},kind: {kind},
Reason: {exception}'''.format(
namespace=namespace, name=name, kind=kind,
exception=e.body
)
else:
kubernetes_object['status'] = 'creating_failed'
msg = '''The request to create a resource failed.
namespace: {namespace}, name: {name},kind: {kind},
Reason: {exception}'''.format(
namespace=namespace, name=name, kind=kind,
exception=e
)
LOG.error(msg)
raise exceptions.CreateApiFalse(error=msg)
new_k8s_objs.append(kubernetes_object)
return new_k8s_objs
def _get_lower_case_name(self, name):
name = name.strip()
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def _init_k8s_obj(self, obj, content, must_param):
for key, value in content.items():
param_value = self._get_lower_case_name(key)
if hasattr(obj, param_value) and \
not isinstance(value, dict) and \
not isinstance(value, list):
setattr(obj, param_value, value)
elif isinstance(value, dict):
obj_name = obj.openapi_types.get(param_value)
if obj_name == 'dict(str, str)':
setattr(obj, param_value, value)
else:
if obj_name in must_param.keys():
rely_obj = eval('client.' + obj_name +
must_param.get(obj_name))
else:
rely_obj = eval('client.' + obj_name + '()')
self._init_k8s_obj(rely_obj, value, must_param)
setattr(obj, param_value, rely_obj)
elif isinstance(value, list):
obj_name = obj.openapi_types.get(param_value)
if obj_name == 'list[str]':
setattr(obj, param_value, value)
else:
rely_objs = []
rely_obj_name = \
re.findall(r".*\[([^\[\]]*)\].*", obj_name)[0]
for v in value:
if rely_obj_name in must_param.keys():
rely_obj = eval('client.' + rely_obj_name +
must_param.get(rely_obj_name))
else:
rely_obj = \
eval('client.' + rely_obj_name + '()')
self._init_k8s_obj(rely_obj, v, must_param)
rely_objs.append(rely_obj)
setattr(obj, param_value, rely_objs)
def _sort_k8s_obj(self, k8s_objs):
pos = 0
objs = k8s_objs
sorted_k8s_objs = list()
for sort_index, kind in enumerate(self.RESOURCE_CREATION_ORDER):
for obj_index, obj in enumerate(objs):
if obj["object"].kind == kind:
sorted_k8s_objs.append(objs.pop(obj_index))
if sort_index == OTHER_RESOURCE_SORT_POSITION:
pos = len(sorted_k8s_objs)
for obj in objs:
sorted_k8s_objs.insert(pos, obj)
return sorted_k8s_objs
# config_labels configures label
def config_labels(self, deployment_name=None, scaling_name=None):
label = dict()

View File

@ -13,7 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import time
import urllib.request as urllib2
import yaml
from kubernetes import client
@ -23,13 +26,23 @@ from oslo_serialization import jsonutils
from tacker._i18n import _
from tacker.common.container import kubernetes_utils
from tacker.common import exceptions
from tacker.common import log
from tacker.common import utils
from tacker.extensions import vnfm
from tacker import objects
from tacker.objects import vnf_package as vnf_package_obj
from tacker.objects import vnf_package_vnfd as vnfd_obj
from tacker.objects import vnf_resources as vnf_resource_obj
from tacker.vnflcm import utils as vnflcm_utils
from tacker.vnfm.infra_drivers import abstract_driver
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
from tacker.vnfm.infra_drivers.kubernetes import translate_template
from tacker.vnfm.infra_drivers import scale_driver
from urllib.parse import urlparse
CNF_TARGET_FILES_KEY = 'lcm-kubernetes-def-files'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -68,6 +81,21 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
self.STACK_RETRIES = cfg.CONF.kubernetes_vim.stack_retries
self.STACK_RETRY_WAIT = cfg.CONF.kubernetes_vim.stack_retry_wait
self.kubernetes = kubernetes_utils.KubernetesHTTPAPI()
self.CHECK_DICT_KEY = [
"Pod",
"Service",
"PersistentVolumeClaim",
"Namespace",
"Node",
"PersistentVolume",
"APIService",
"DaemonSet",
"Deployment",
"ReplicaSet",
"StatefulSet",
"Job",
"VolumeAttachment"
]
def get_type(self):
return 'kubernetes'
@ -116,61 +144,393 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
from Pod objects is RUNNING.
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = \
self.kubernetes.get_core_v1_api_client(auth=auth_cred)
deployment_info = vnf_id.split(COMMA_CHARACTER)
mgmt_ips = dict()
pods_information = self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
status = self._get_pod_status(pods_information)
stack_retries = self.STACK_RETRIES
error_reason = None
while status == 'Pending' and stack_retries > 0:
time.sleep(self.STACK_RETRY_WAIT)
pods_information = \
self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
if '{' not in vnf_id and '}' not in vnf_id:
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = \
self.kubernetes.get_core_v1_api_client(auth=auth_cred)
deployment_info = vnf_id.split(COMMA_CHARACTER)
mgmt_ips = dict()
pods_information = self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
status = self._get_pod_status(pods_information)
LOG.debug('status: %s', status)
stack_retries = stack_retries - 1
stack_retries = self.STACK_RETRIES
error_reason = None
while status == 'Pending' and stack_retries > 0:
time.sleep(self.STACK_RETRY_WAIT)
pods_information = \
self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
status = self._get_pod_status(pods_information)
LOG.debug('status: %s', status)
stack_retries = stack_retries - 1
LOG.debug('VNF initializing status: %(service_name)s %(status)s',
{'service_name': str(deployment_info), 'status': status})
if stack_retries == 0 and status != 'Running':
error_reason = _("Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
stack=vnf_id)
LOG.warning("VNF Creation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
elif stack_retries != 0 and status != 'Running':
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
LOG.debug('VNF initializing status: %(service_name)s '
'%(status)s',
{'service_name': str(deployment_info),
'status': status})
if stack_retries == 0 and status != 'Running':
error_reason = _(
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(
self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
stack=vnf_id)
LOG.warning("VNF Creation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
elif stack_retries != 0 and status != 'Running':
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
for i in range(0, len(deployment_info), 2):
namespace = deployment_info[i]
deployment_name = deployment_info[i + 1]
service_info = core_v1_api_client.read_namespaced_service(
name=deployment_name,
namespace=namespace)
if service_info.metadata.labels.get("management_connection"):
vdu_name = service_info.metadata.labels.\
get("vdu_name").split("-")[1]
mgmt_ip = service_info.spec.cluster_ip
mgmt_ips.update({vdu_name: mgmt_ip})
vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes(
mgmt_ips)
for i in range(0, len(deployment_info), 2):
namespace = deployment_info[i]
deployment_name = deployment_info[i + 1]
service_info = core_v1_api_client.read_namespaced_service(
name=deployment_name,
namespace=namespace)
if service_info.metadata.labels.get(
"management_connection"):
vdu_name = service_info.metadata.labels.\
get("vdu_name").split("-")[1]
mgmt_ip = service_info.spec.cluster_ip
mgmt_ips.update({vdu_name: mgmt_ip})
vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes(
mgmt_ips)
except Exception as e:
LOG.error('Creating wait VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def create_wait_k8s(self, k8s_objs, k8s_client_dict, vnf_instance):
try:
time.sleep(self.STACK_RETRY_WAIT)
keep_going = True
stack_retries = self.STACK_RETRIES
while keep_going and stack_retries > 0:
for k8s_obj in k8s_objs:
kind = k8s_obj.get('object').kind
namespace = k8s_obj.get('namespace')
if hasattr(k8s_obj.get('object').metadata, 'name'):
name = k8s_obj.get('object').metadata.name
else:
name = ''
api_version = k8s_obj.get('object').api_version
if k8s_obj.get('status') == 'Creating':
if kind in self.CHECK_DICT_KEY:
check_method = self.\
_select_check_status_by_kind(kind)
check_method(k8s_client_dict, k8s_obj,
namespace, name, api_version)
else:
k8s_obj['status'] = 'Create_complete'
keep_going = False
for k8s_obj in k8s_objs:
if k8s_obj.get('status') != 'Create_complete':
keep_going = True
else:
if k8s_obj.get('object', '').metadata:
LOG.debug(
'Resource namespace: {namespace},'
'name:{name},kind: {kind} '
'is create complete'.format(
namespace=k8s_obj.get('namespace'),
name=k8s_obj.get('object').metadata.name,
kind=k8s_obj.get('object').kind)
)
else:
LOG.debug(
'Resource namespace: {namespace},'
'name:{name},kind: {kind} '
'is create complete'.format(
namespace=k8s_obj.get('namespace'),
name='',
kind=k8s_obj.get('object').kind)
)
if keep_going:
time.sleep(self.STACK_RETRY_WAIT)
stack_retries -= 1
if stack_retries == 0 and keep_going:
LOG.error('It is time out, When instantiate cnf,'
'waiting for resource creation.')
for k8s_obj in k8s_objs:
if k8s_obj.get('status') == 'Creating':
k8s_obj['status'] = 'Wait_failed'
err_reason = _("The resources are creating time out."
"namespace: {namespace}, name:{name}, "
"kind: {kind}).Reason: {message}").\
format(namespace=k8s_obj.get('namespace'),
name=k8s_obj.get('object').metadata.name,
kind=k8s_obj.get('object').kind,
message=k8s_obj['message'])
LOG.error(err_reason)
error_reason = _(
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT),
stack=vnf_instance.id
)
raise vnfm.CNFCreateWaitFailed(reason=error_reason)
return k8s_objs
except Exception as e:
LOG.error('Creating wait VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
LOG.error('Creating wait CNF got an error due to %s', e)
raise e
def _select_check_status_by_kind(self, kind):
check_dict = {
"Pod": self._check_status_pod,
"Service": self._check_status_service,
"PersistentVolumeClaim":
self._check_status_persistent_volume_claim,
"Namespace": self._check_status_namespace,
"Node": self._check_status_node,
"PersistentVolume": self._check_status_persistent_volume,
"APIService": self._check_status_api_service,
"DaemonSet": self._check_status_daemon_set,
"Deployment": self._check_status_deployment,
"ReplicaSet": self._check_status_replica_set,
"StatefulSet": self._check_status_stateful_set,
"Job": self._check_status_job,
"VolumeAttachment": self._check_status_volume_attachment
}
return check_dict[kind]
def _check_is_ip(self, ip_str):
if re.match(r'^\d{,3}.\d{,3}.\d{,3}.\d{,3}$', ip_str):
num_list = [int(x) for x in ip_str.split('.')]
for i in num_list:
if i > 255 or i < 0:
return False
return True
else:
return False
def _check_status_stateful_set(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
stateful_set = k8s_client_dict[api_version]. \
read_namespaced_stateful_set(namespace=namespace, name=name)
if stateful_set.status.replicas != \
stateful_set.status.ready_replicas:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Pod in StatefulSet is still creating. " \
"The pod is ready {value1}/{value2}".format(
value1=stateful_set.status.ready_replicas,
value2=stateful_set.status.replicas
)
else:
for i in range(0, stateful_set.spec.replicas):
volume_claim_templates = stateful_set.spec.\
volume_claim_templates
for volume_claim_template in volume_claim_templates:
pvc_name = "-".join(
[volume_claim_template.metadata.name, name, str(i)])
persistent_volume_claim = k8s_client_dict['v1']. \
read_namespaced_persistent_volume_claim(
namespace=namespace, name=pvc_name)
if persistent_volume_claim.status.phase != 'Bound':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "PersistentVolumeClaim in " \
"StatefulSet is still " \
"creating." \
"The status is " \
"{status}".format(
status=persistent_volume_claim.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'StatefulSet is created'
def _check_status_pod(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
pod = k8s_client_dict[api_version].read_namespaced_pod(
namespace=namespace, name=name)
if pod.status.phase != 'Running':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Pod is still creating. The status is " \
"{status}".format(status=pod.
status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Pod is created"
def _check_status_service(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
service = k8s_client_dict[api_version].read_namespaced_service(
namespace=namespace, name=name)
status_flag = False
if service.spec.cluster_ip in ['', None] or \
self._check_is_ip(service.spec.cluster_ip):
try:
endpoint = k8s_client_dict['v1'].\
read_namespaced_endpoints(namespace=namespace, name=name)
if endpoint:
status_flag = True
except Exception as e:
msg = _('read endpoinds failed.kind:{kind}.reason:{e}'.format(
kind=service.kind, e=e))
LOG.error(msg)
raise exceptions.ReadEndpoindsFalse(error=msg)
if status_flag:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Service is created"
else:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Service is still creating." \
"The status is False"
def _check_status_persistent_volume_claim(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
claim = k8s_client_dict[api_version].\
read_namespaced_persistent_volume_claim(
namespace=namespace, name=name)
if claim.status.phase != 'Bound':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "PersistentVolumeClaim is still creating."\
"The status is {status}".\
format(status=claim.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "PersistentVolumeClaim is created"
def _check_status_namespace(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
name_space = k8s_client_dict[api_version].read_namespace(name=name)
if name_space.status.phase != 'Active':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Namespace is still creating." \
"The status is {status}". \
format(status=name_space.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Namespace is created"
def _check_status_node(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
node = k8s_client_dict[api_version].read_node(name=name)
status_flag = False
for condition in node.status.conditions:
if condition.type == 'Ready':
if condition.status == 'True':
status_flag = True
break
else:
continue
if status_flag:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Node is created"
else:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Node is still creating." \
"The status is False"
def _check_status_persistent_volume(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
volume = k8s_client_dict[api_version].\
read_persistent_volume(name=name)
if volume.status.phase != 'Available' and \
volume.status.phase != 'Bound':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "PersistentVolume is still creating." \
"The status is {status}". \
format(status=volume.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "PersistentVolume is created"
def _check_status_api_service(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
api_service = k8s_client_dict[api_version].read_api_service(name=name)
status_flag = False
for condition in api_service.status.conditions:
if condition.type == 'Available':
if condition.status == 'True':
status_flag = True
break
else:
continue
if status_flag:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "APIService is created"
else:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "APIService is still creating." \
"The status is False"
def _check_status_daemon_set(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
daemon_set = k8s_client_dict[api_version].\
read_namespaced_daemon_set(namespace=namespace, name=name)
if daemon_set.status.desired_number_scheduled != \
daemon_set.status.number_ready:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "DaemonSet is still creating. " \
"The DaemonSet is ready {value1}/{value2}".\
format(value1=daemon_set.status.number_ready,
value2=daemon_set.status.desired_number_scheduled)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'DaemonSet is created'
def _check_status_deployment(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
deployment = k8s_client_dict[api_version].\
read_namespaced_deployment(namespace=namespace, name=name)
if deployment.status.replicas != deployment.status.ready_replicas:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Deployment is still creating. " \
"The Deployment is ready {value1}/{value2}".\
format(value1=deployment.status.ready_replicas,
value2=deployment.status.replicas
)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'Deployment is created'
def _check_status_replica_set(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
replica_set = k8s_client_dict[api_version].\
read_namespaced_replica_set(namespace=namespace, name=name)
if replica_set.status.replicas != replica_set.status.ready_replicas:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "ReplicaSet is still creating. " \
"The ReplicaSet is ready {value1}/{value2}".\
format(value1=replica_set.status.ready_replicas,
value2=replica_set.status.replicas
)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'ReplicaSet is created'
def _check_status_job(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
job = k8s_client_dict[api_version].\
read_namespaced_job(namespace=namespace, name=name)
if job.spec.completions != job.status.succeeded:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Job is still creating." \
"The status is {status}". \
format(status=job.spec.completions)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'Job is created'
def _check_status_volume_attachment(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
volume = k8s_client_dict[api_version].\
read_volume_attachment(name=name)
if not volume.status.attached:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "VolumeAttachment is still creating." \
"The status is {status}". \
format(status=volume.status.attached)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'VolumeAttachment is created'
def _get_pods_information(self, core_v1_api_client, deployment_info):
"""Get pod information"""
@ -265,10 +625,9 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
# TODO(phuoc): do nothing, will update it if we need actions
pass
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None):
def _delete_legacy(self, vnf_id, auth_cred):
"""Delete function"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
@ -330,23 +689,195 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
except Exception as e:
LOG.debug(e)
pass
except Exception:
raise
def _select_delete_api(self, k8s_client_dict, namespace, name,
kind, api_version, body):
"""select kubernetes delete api and call"""
def convert(name):
name_with_underscores = re.sub(
'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
name_with_underscores).lower()
snake_case_kind = convert(kind)
kubernetes = translate_outputs.Transformer(
None, None, None, None)
try:
if 'namespaced' in kubernetes.method_value.get(kind):
delete_api = eval('k8s_client_dict[api_version].'
'delete_namespaced_%s' % snake_case_kind)
response = delete_api(name=name, namespace=namespace,
body=body)
else:
delete_api = eval('k8s_client_dict[api_version].'
'delete_%s' % snake_case_kind)
response = delete_api(name=name, body=body)
except Exception:
raise
return response
def _get_pvc_list_for_delete(self, k8s_client_dict, sfs_name, namespace):
pvc_list_for_delete = list()
try:
resp_read_sfs = k8s_client_dict['apps/v1'].\
read_namespaced_stateful_set(sfs_name, namespace)
sfs_spec = resp_read_sfs.spec
volume_claim_templates = list()
volume_claim_templates = sfs_spec.volume_claim_templates
try:
resp_list_pvc = k8s_client_dict['v1'].\
list_namespaced_persistent_volume_claim(namespace)
pvc_list = resp_list_pvc.items
for volume_claim_template in volume_claim_templates:
pvc_template_metadata = volume_claim_template.metadata
match_pattern = '-'.join(
[pvc_template_metadata.name, sfs_name, ""])
for pvc in pvc_list:
pvc_metadata = pvc.metadata
pvc_name = pvc_metadata.name
match_result = re.match(
match_pattern + '[0-9]+$', pvc_name)
if match_result is not None:
pvc_list_for_delete.append(pvc_name)
except Exception as e:
LOG.debug(e)
pass
except Exception as e:
LOG.debug(e)
pass
return pvc_list_for_delete
@log.log
def _delete_k8s_obj(self, kind, k8s_client_dict, vnf_resource, body):
namespace = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[0]
name = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[1]
api_version = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[0]
pvc_list_for_delete = list()
# if kind is StatefulSet, create name list for deleting
# PersistentVolumeClaim created when StatefulSet is generated.
if kind == 'StatefulSet':
pvc_list_for_delete = \
self._get_pvc_list_for_delete(
k8s_client_dict=k8s_client_dict,
sfs_name=name,
namespace=namespace)
# delete target k8s obj
try:
self._select_delete_api(
k8s_client_dict=k8s_client_dict,
namespace=namespace,
name=name,
kind=kind,
api_version=api_version,
body=body)
LOG.debug('Successfully deleted resource: '
'kind=%(kind)s, name=%(name)s',
{"kind": kind, "name": name})
except Exception as e:
LOG.debug(e)
pass
if (kind == 'StatefulSet' and
len(pvc_list_for_delete) > 0):
for delete_pvc_name in pvc_list_for_delete:
try:
k8s_client_dict['v1'].\
delete_namespaced_persistent_volume_claim(
name=delete_pvc_name,
namespace=namespace,
body=body)
except Exception as e:
LOG.debug(e)
pass
@log.log
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None,
vnf_instance=None, terminate_vnf_req=None):
"""Delete function"""
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
if not vnf_instance:
# execute legacy delete method
self._delete_legacy(vnf_id, auth_cred)
else:
# initialize Kubernetes APIs
k8s_client_dict = self.kubernetes.\
get_k8s_client_dict(auth=auth_cred)
# get V1DeleteOptions for deleting an API object
body = {}
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, vnf_instance.id)
if terminate_vnf_req:
if terminate_vnf_req.termination_type == 'GRACEFUL':
grace_period_seconds = terminate_vnf_req.\
graceful_termination_timeout
elif terminate_vnf_req.termination_type == 'FORCEFUL':
grace_period_seconds = 0
body = client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=grace_period_seconds)
else:
body = client.V1DeleteOptions(
propagation_policy='Foreground')
# follow the order below to resolve dependency when deleting
ordered_kind = [
# 1.
'Deployment', 'Job', 'DaemonSet', 'StatefulSet',
# 2.
'Pod',
# 3.
'PersistentVolumeClaim', 'ConfigMap', 'Secret',
'PriorityClass',
# 4.
'PersistentVolume',
# 5.
'StorageClass',
# 6. Except for 1 to 5 above, delete before `Namespace`
'Service', 'LimitRange', 'PodTemplate', 'Node',
'ResourceQuota', 'ServiceAccount', 'APIService',
'ReplicaSet', 'ControllerRevision',
'HorizontalPodAutoscaler', 'Lease', 'NetworkPolicy',
'ClusterRole', 'ClusterRoleBinding', 'Role', 'RoleBinding',
'VolumeAttachment',
# 7. Delete `Namespace` finally
'Namespace'
]
for kind in ordered_kind:
for vnf_resource in vnf_resources:
obj_kind = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[1]
if obj_kind == kind:
self._delete_k8s_obj(
kind=obj_kind,
k8s_client_dict=k8s_client_dict,
vnf_resource=vnf_resource,
body=body)
except Exception as e:
LOG.error('Deleting VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def delete_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None):
"""Delete wait function
def _delete_wait_legacy(self, vnf_id, auth_cred):
"""Delete wait function for legacy
This function is used to checking a containerized VNF is deleted
completely or not. We do it by get information of Kubernetes objects.
When Tacker can not get any information about service, the VNF will be
marked as deleted.
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
@ -401,6 +932,91 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
except Exception as e:
LOG.error('Deleting wait VNF got an error due to %s', e)
raise
def _select_k8s_obj_read_api(self, k8s_client_dict, namespace, name,
kind, api_version):
"""select kubernetes read api and call"""
def convert(name):
name_with_underscores = re.sub(
'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
name_with_underscores).lower()
snake_case_kind = convert(kind)
try:
if namespace:
read_api = eval('k8s_client_dict[api_version].'
'read_namespaced_%s' % snake_case_kind)
response = read_api(name=name, namespace=namespace)
else:
read_api = eval('k8s_client_dict[api_version].'
'read_%s' % snake_case_kind)
response = read_api(name=name)
except Exception:
raise
return response
@log.log
def delete_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None, vnf_instance=None):
"""Delete wait function
This function is used to checking a containerized VNF is deleted
completely or not. We do it by get information of Kubernetes objects.
When Tacker can not get any information about service, the VNF will be
marked as deleted.
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
if not vnf_instance:
# execute legacy delete_wait method
self._delete_wait_legacy(vnf_id, auth_cred)
else:
vnf_resources = objects.VnfResourceList.\
get_by_vnf_instance_id(context, vnf_instance.id)
k8s_client_dict = self.kubernetes.\
get_k8s_client_dict(auth=auth_cred)
keep_going = True
stack_retries = self.STACK_RETRIES
while keep_going and stack_retries > 0:
count = 0
for vnf_resource in vnf_resources:
namespace = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[0]
name = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[1]
api_version = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[0]
kind = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[1]
try:
self._select_k8s_obj_read_api(
k8s_client_dict=k8s_client_dict,
namespace=namespace,
name=name,
kind=kind,
api_version=api_version)
count = count + 1
except Exception:
pass
stack_retries = stack_retries - 1
# If one of objects is still alive, keeps on waiting
if count > 0:
keep_going = True
time.sleep(self.STACK_RETRY_WAIT)
else:
keep_going = False
except Exception as e:
LOG.error('Deleting wait VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
@ -551,22 +1167,143 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass
def _get_target_k8s_files(self, instantiate_vnf_req):
if instantiate_vnf_req.additional_params and\
CNF_TARGET_FILES_KEY in\
instantiate_vnf_req.additional_params.keys():
target_k8s_files = instantiate_vnf_req.\
additional_params['lcm-kubernetes-def-files']
else:
target_k8s_files = list()
return target_k8s_files
def pre_instantiation_vnf(self, context, vnf_instance,
vim_connection_info, image_data):
raise NotImplementedError()
vim_connection_info, vnf_software_images,
instantiate_vnf_req, vnf_package_path):
vnf_resources = dict()
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
if not target_k8s_files:
# if artifact_files is not provided in request,
# we consider k8s info in provided by TOSCA-based VNFD
# and we will push the request to existed code
return vnf_resources
else:
vnfd = vnfd_obj.VnfPackageVnfd.get_by_id(
context, vnf_instance.vnfd_id)
package_uuid = vnfd.package_uuid
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
context, package_uuid, expected_attrs=['vnf_artifacts'])
if vnf_package.vnf_artifacts:
vnf_artifacts = vnf_package.vnf_artifacts
length = len(vnf_artifacts)
for target_k8s_file in target_k8s_files:
for index, vnf_artifact in enumerate(vnf_artifacts):
if vnf_artifact.artifact_path == target_k8s_file:
break
if length > 1 and index < length - 1:
continue
LOG.debug('CNF Artifact {path} is not found.'.format(
path=target_k8s_file))
setattr(vnf_instance, 'vim_connection_info', [])
setattr(vnf_instance, 'task_state', None)
vnf_instance.save()
raise vnfm.CnfDefinitionNotFound(
path=target_k8s_file)
else:
LOG.debug('VNF Artifact {path} is not found.'.format(
path=vnf_package.vnf_artifacts))
setattr(vnf_instance, 'vim_connection_info', [])
setattr(vnf_instance, 'task_state', None)
vnf_instance.save()
raise exceptions.VnfArtifactNotFound(id=vnf_package.id)
for target_k8s_index, target_k8s_file \
in enumerate(target_k8s_files):
if ((urlparse(target_k8s_file).scheme == 'file') or
(bool(urlparse(target_k8s_file).scheme) and
bool(urlparse(target_k8s_file).netloc))):
file_content = urllib2.urlopen(target_k8s_file).read()
else:
if vnf_package_path is None:
vnf_package_path = \
vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
target_k8s_file_path = os.path.join(
vnf_package_path, target_k8s_file)
with open(target_k8s_file_path, 'r') as f:
file_content = f.read()
file_content_dict_list = yaml.safe_load_all(file_content)
vnf_resources_temp = []
for file_content_dict in file_content_dict_list:
vnf_resource = vnf_resource_obj.VnfResource(
context=context)
vnf_resource.vnf_instance_id = vnf_instance.id
vnf_resource.resource_name = ','.join([
file_content_dict.get('metadata', {}).get(
'namespace', ''),
file_content_dict.get('metadata', {}).get(
'name', '')])
vnf_resource.resource_type = ','.join([
file_content_dict.get('apiVersion', ''),
file_content_dict.get('kind', '')])
vnf_resource.resource_identifier = ''
vnf_resource.resource_status = ''
vnf_resources_temp.append(vnf_resource)
vnf_resources[target_k8s_index] = vnf_resources_temp
return vnf_resources
def delete_vnf_instance_resource(self, context, vnf_instance,
vim_connection_info, vnf_resource):
raise NotImplementedError()
pass
def instantiate_vnf(self, context, vnf_instance, vnfd_dict,
vim_connection_info, instantiate_vnf_req,
grant_response):
raise NotImplementedError()
grant_response, vnf_package_path, base_hot_dict):
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
auth_attr = vim_connection_info.access_info
if not target_k8s_files:
# The case is based on TOSCA for CNF operation.
# It is out of the scope of this patch.
instance_id = self.create(
None, context, vnf_instance, auth_attr)
return instance_id
else:
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
k8s_client_dict = self.kubernetes.get_k8s_client_dict(auth_cred)
if vnf_package_path is None:
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
transformer = translate_outputs.Transformer(
None, None, None, k8s_client_dict)
deployment_dict_list = list()
k8s_objs = transformer.\
get_k8s_objs_from_yaml(target_k8s_files, vnf_package_path)
k8s_objs = transformer.deploy_k8s(k8s_objs)
k8s_objs = self.create_wait_k8s(
k8s_objs, k8s_client_dict, vnf_instance)
for k8s_obj in k8s_objs:
deployment_dict = dict()
deployment_dict['namespace'] = k8s_obj.get('namespace')
if k8s_obj.get('object').metadata:
deployment_dict['name'] = k8s_obj.get('object').\
metadata.name
else:
deployment_dict['name'] = ''
deployment_dict['apiVersion'] = k8s_obj.get(
'object').api_version
deployment_dict['kind'] = k8s_obj.get('object').kind
deployment_dict['status'] = k8s_obj.get('status')
deployment_dict_list.append(deployment_dict)
deployment_str_list = [str(x) for x in deployment_dict_list]
# all the deployment object will store into resource_info_str.
# and the instance_id is created from all deployment_dict.
resource_info_str = ';'.join(deployment_str_list)
self.clean_authenticate_vim(auth_cred, file_descriptor)
vnfd_dict['instance_id'] = resource_info_str
return resource_info_str
def post_vnf_instantiation(self, context, vnf_instance,
vim_connection_info):
raise NotImplementedError()
pass
def heal_vnf(self, context, vnf_instance, vim_connection_info,
heal_vnf_request):

View File

@ -58,7 +58,9 @@ class TOSCAToKubernetes(object):
transformer = translate_outputs.Transformer(
core_v1_api_client=self.core_v1_api_client,
app_v1_api_client=self.app_v1_api_client,
scaling_api_client=self.scaling_api_client)
scaling_api_client=self.scaling_api_client,
k8s_client_dict=None
)
kubernetes_objects = transformer.transform(tosca_kube_objects)
deployment_names = transformer.deploy(
kubernetes_objects=kubernetes_objects)

View File

@ -450,13 +450,19 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes(mgmt_ips)
@log.log
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None):
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None,
vnf_instance=None, terminate_vnf_req=None):
if terminate_vnf_req:
if (terminate_vnf_req.termination_type == 'GRACEFUL' and
terminate_vnf_req.graceful_termination_timeout > 0):
time.sleep(terminate_vnf_req.graceful_termination_timeout)
heatclient = hc.HeatClient(auth_attr, region_name)
heatclient.delete(vnf_id)
@log.log
def delete_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None):
region_name=None, vnf_instance=None):
self._wait_until_stack_ready(vnf_id, auth_attr,
infra_cnst.STACK_DELETE_IN_PROGRESS,
infra_cnst.STACK_DELETE_COMPLETE, vnfm.VNFDeleteWaitFailed,
@ -605,8 +611,10 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
raise vnfm.VNFHealFailed(vnf_id=vnf_dict['id'])
@log.log
def pre_instantiation_vnf(self, context, vnf_instance,
vim_connection_info, vnf_software_images):
def pre_instantiation_vnf(
self, context, vnf_instance, vim_connection_info,
vnf_software_images, instantiate_vnf_req=None,
vnf_package_path=None):
glance_client = gc.GlanceClient(vim_connection_info)
vnf_resources = {}