Merge "(move) Move the dev pipeline / standard-container"

This commit is contained in:
Zuul 2021-02-26 17:52:53 +00:00 committed by Gerrit Code Review
commit 5d3d0df24b
55 changed files with 1697 additions and 96 deletions

View File

@ -14,23 +14,22 @@ rules:
resources: ["namespaces"] resources: ["namespaces"]
verbs: ["list", "get", "create"] verbs: ["list", "get", "create"]
- apiGroups: ["rbac.authorization.k8s.io"] - apiGroups: ["rbac.authorization.k8s.io"]
# allow namespaces to be retrieved to validate we haven't already created it already # allow roles to be retrieved to validate we haven't already created it already
resources: ["roles"] resources: ["roles"]
verbs: ["list", "get", "create"] verbs: ["list", "get", "create"]
- apiGroups: [""] - apiGroups: [""]
# secrets are only needed for GitHub/GitLab interceptors
resources: ["configmaps"] resources: ["configmaps"]
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch", "create"]
# Permissions to create resources in associated TriggerTemplates # Permissions to create resources in associated TriggerTemplates
- apiGroups: ["tekton.dev"] - apiGroups: ["tekton.dev"]
resources: ["pipelineruns", "pipelineresources", "taskruns"] resources: ["pipelineruns", "pipelineresources", "taskruns", "pipelines","tasks"]
verbs: ["create"] verbs: ["create", "get", "list"]
- apiGroups: [""] - apiGroups: [""]
resources: ["serviceaccounts"] resources: ["serviceaccounts"]
verbs: ["impersonate", "get", "create"] verbs: ["impersonate", "get", "create"]
- apiGroups: [""] - apiGroups: [""]
resources: ["secrets"] resources: ["secrets"]
verbs: ["get"] verbs: ["get", "list", "create"]
- apiGroups: [""] - apiGroups: [""]
resources: ["services"] resources: ["services"]
verbs: ["get"] verbs: ["get"]
@ -42,4 +41,4 @@ rules:
verbs: ["get", "create"] verbs: ["get", "create"]
... ...
{{- end -}} {{- end -}}
{{- include "helpers.template.overlay" ( dict "Global" $ "template_definition" "ClusterRole-el" ) }} {{- include "helpers.template.overlay" ( dict "Global" $ "template_definition" "ClusterRole-el" ) }}

View File

@ -21,29 +21,38 @@ spec:
image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_create_namespace" ) }} image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_create_namespace" ) }}
script: | script: |
#!/bin/bash #!/bin/bash
set -ex
create_namespace() { create_namespace() {
if ! [[ $(kubectl get ns jarvis-$(params.changeNumber)-$(params.patchSetNumber)) ]] ; then if ! [[ $(kubectl get ns jarvis-$(params.changeNumber)-$(params.patchSetNumber)) ]] ; then
kubectl create ns jarvis-$(params.changeNumber)-$(params.patchSetNumber) kubectl create ns jarvis-$(params.changeNumber)-$(params.patchSetNumber)
echo "Created namespace jarvis-$(params.changeNumber)-$(params.patchSetNumber)" echo "Created namespace jarvis-$(params.changeNumber)-$(params.patchSetNumber)"
else else
echo "Namespace already exists" echo "Namespace already exists, delete all resources for re-run."
exit kubectl delete all --all -n jarvis-$(params.changeNumber)-$(params.patchSetNumber)
fi fi
} }
create_namespace create_namespace
- name: create-k8s-objects - name: create-k8s-objects
image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_create_namespace" ) }} image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_create_namespace" ) }}
script: | script: |
#Service account creation #!/bin/bash
cat > $(workspaces.output.path)/service-account.yaml <<EOF create_sa() {
if ! [[ $(kubectl get serviceaccount -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) sa-development-pipeline) ]] ; then
tee $(workspaces.output.path)/service-account.yaml <<EOF
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: jarvis-$(params.changeNumber)-$(params.patchSetNumber) name: sa-development-pipeline
namespace: jarvis-$(params.changeNumber)-$(params.patchSetNumber) namespace: jarvis-$(params.changeNumber)-$(params.patchSetNumber)
EOF EOF
#Role creation else
cat > $(workspaces.output.path)/role.yaml <<EOF echo "Service Account already exists"
exit
fi
}
create_role() {
if ! [[ $(kubectl get role -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) jarvis-$(params.changeNumber)-$(params.patchSetNumber)) ]] ; then
tee $(workspaces.output.path)/role.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
metadata: metadata:
@ -68,8 +77,14 @@ spec:
resources: ["roles", "rolebindings"] resources: ["roles", "rolebindings"]
verbs: ["get"] verbs: ["get"]
EOF EOF
#Rolebinding creation else
cat > $(workspaces.output.path)/rolebinding.yaml <<EOF echo "Role already exists"
exit
fi
}
create_rolebinding() {
if ! [[ $(kubectl get rolebinding -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) jarvis-$(params.changeNumber)-$(params.patchSetNumber)) ]] ; then
tee $(workspaces.output.path)/rolebinding.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
@ -77,13 +92,22 @@ spec:
namespace: jarvis-$(params.changeNumber)-$(params.patchSetNumber) namespace: jarvis-$(params.changeNumber)-$(params.patchSetNumber)
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: jarvis-$(params.changeNumber)-$(params.patchSetNumber) name: sa-development-pipeline
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: Role
name: jarvis-$(params.changeNumber)-$(params.patchSetNumber) name: jarvis-$(params.changeNumber)-$(params.patchSetNumber)
EOF EOF
#!/bin/bash else
echo "rolebinding already exists"
exit
fi
}
#Service account creation
create_sa
create_role
create_rolebinding
cat $(workspaces.output.path)/service-account.yaml | kubectl apply -f - cat $(workspaces.output.path)/service-account.yaml | kubectl apply -f -
cat $(workspaces.output.path)/role.yaml | kubectl apply -f - cat $(workspaces.output.path)/role.yaml | kubectl apply -f -
cat $(workspaces.output.path)/rolebinding.yaml | kubectl apply -f - cat $(workspaces.output.path)/rolebinding.yaml | kubectl apply -f -
@ -138,13 +162,82 @@ spec:
- name: create-secrets - name: create-secrets
image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_secrets" ) }} image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_secrets" ) }}
script: | script: |
#!/bin/sh #!/bin/bash
## Creating kubeconfig secret in correct namespace
SECRET_JSON_ORIGINAL=$(mktemp --suffix=".json")
kubectl get secret -n development-pipeline kubeconfig-secret -o=json > "$SECRET_JSON_ORIGINAL"
SECRET_JSON=$(mktemp --suffix=".json")
jq 'del(.metadata.namespace) | del(.metadata.creationTimestamp) | del(.metadata.labels."controller-uid") | del(.metadata.resourceVersion) | del(.metadata.selfLink) | del(.metadata.uid) | del(.spec.selector) | del(.spec.template.metadata.creationTimestamp) | del(.spec.template.metadata.labels."controller-uid" )' "$SECRET_JSON_ORIGINAL" > "$SECRET_JSON"
kubectl create secret generic harbor-ca --from-file=harbor-ca=/etc/jarvis/certs/ca/ca.pem -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) || true cat "$SECRET_JSON" | kubectl create -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) -f -
kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=$HOME/.kube/config -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) || true
#Required to know what authentication to use when pushing to Harbor, should have a different ID then admin in future. ## Creating Harbor certificate in correct namespace
kubectl create secret generic harbor-basic-auth --from-literal=username='admin' --from-literal=password='Harbor12345' -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) || true SECRET_JSON_ORIGINAL=$(mktemp --suffix=".json")
kubectl create secret docker-registry harbor-docker-auth --docker-username=admin --docker-password=Harbor12345 --docker-email=example@gmail.com --docker-server=harbor-core.jarvis.local -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) || true kubectl get secret -n development-pipeline harbor-ca -o=json > "$SECRET_JSON_ORIGINAL"
SECRET_JSON=$(mktemp --suffix=".json")
jq 'del(.metadata.namespace) | del(.metadata.creationTimestamp) | del(.metadata.labels."controller-uid") | del(.metadata.resourceVersion) | del(.metadata.selfLink) | del(.metadata.uid) | del(.spec.selector) | del(.spec.template.metadata.creationTimestamp) | del(.spec.template.metadata.labels."controller-uid" )' "$SECRET_JSON_ORIGINAL" > "$SECRET_JSON"
cat "$SECRET_JSON" | kubectl create -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) -f -
rm "$SECRET_JSON_ORIGINAL"
rm "$SECRET_JSON"
## Harbor basic auth and docker auth
kubectl create secret generic harbor-basic-auth \
--from-literal=username='admin' \
--from-literal=password='Harbor12345' \
-n jarvis-$(params.changeNumber)-$(params.patchSetNumber)
kubectl create secret docker-registry harbor-docker-auth \
--docker-username=admin \
--docker-password=Harbor12345 \
--docker-email=example@gmail.com \
--docker-server=harbor-core.jarvis.local \
-n jarvis-$(params.changeNumber)-$(params.patchSetNumber)
- name: install-development-pipeline
image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_secrets" ) }}
script: |
#!/bin/bash
change_ref="refs/changes/$(echo "0$(params.changeNumber)" | awk '{ print substr( $0, length($0) - 1, length($0) ) }')/$(params.changeNumber)/$(params.patchSetNumber)"
echo $change_ref
cd "$(workspaces.output.path)"/jarvis
helm upgrade --install development-pipeline \
-n jarvis-$(params.changeNumber)-$(params.patchSetNumber) \
./development-pipeline \
--set "git_repo=$(params.repoRoot)/$(params.project)" \
--set "refspec=$change_ref"
- name: create-pipelinerun
image: {{ include "helpers.pod.container.image" ( dict "Global" $ "Application" "task_secrets" ) }}
script: |
#!/bin/bash
kubectl create \
-n jarvis-$(params.changeNumber)-$(params.patchSetNumber) \
-f "$(workspaces.output.path)"/jarvis/development-pipeline/pipelinerun-validation.yaml
# Default wait timeout is 1000 seconds
end=$(date +%s)
timeout=${3:-3000}
end=$((end + timeout))
while true; do
pipelinerunstatus="$(kubectl get pipelinerun -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) $(kubectl get pipelinerun -n jarvis-$(params.changeNumber)-$(params.patchSetNumber) -o name | awk -F '/' "/development-pipeline/ { print \$NF; exit }") | tail -1 | awk '{ print $2 }')"
[ "${pipelinerunstatus}" == "True" ] && break
[ "${pipelinerunstatus}" == "False" ] && exit 1
sleep 5
now=$(date +%s)
if [ $now -gt $end ] ; then
echo "Pipelinerun failed to complete after $timeout seconds"
echo
kubectl get pipelinerun --namespace jarvis-$(params.changeNumber)-$(params.patchSetNumber) -o wide
echo "Some pipelineruns are not complete"
exit 1
fi
echo "kubectl get pipelinerun -n jarvis-$(params.changeNumber)-$(params.patchSetNumber)" | grep development-pipeline
done
... ...
{{- end -}} {{- end -}}
{{- include "helpers.template.overlay" ( dict "Global" $ "template_definition" "Task-createProjectAccess" ) }} {{- include "helpers.template.overlay" ( dict "Global" $ "template_definition" "Task-createProjectAccess" ) }}

View File

@ -17,6 +17,7 @@
LOGDIR: "{{ logs_dir }}" LOGDIR: "{{ logs_dir }}"
args: args:
chdir: "{{ zuul.project.src_dir }}/{{ gate_scripts_relative_path | default('') }}" chdir: "{{ zuul.project.src_dir }}/{{ gate_scripts_relative_path | default('') }}"
ignore_errors: True
- name: Upload the logs - name: Upload the logs
synchronize: synchronize:

View File

@ -44,6 +44,5 @@ Vagrant.configure("2") do |config|
./tools/gate/jarvis/650-temporary-setup.sh ./tools/gate/jarvis/650-temporary-setup.sh
./tools/gate/jarvis/700-deploy-jarvis-system.sh ./tools/gate/jarvis/700-deploy-jarvis-system.sh
./tools/gate/jarvis/800-deploy-jarvis-projects.sh ./tools/gate/jarvis/800-deploy-jarvis-projects.sh
./tools/gate/jarvis/900-development-pipeline.sh
SHELL SHELL
end end

View File

@ -1,5 +1,5 @@
#!/bin/bash #!/bin/bash
set -ux set -x
export PARALLELISM_FACTOR=2 export PARALLELISM_FACTOR=2
export OBJECT_TYPE=node,clusterrole,clusterrolebinding,storageclass,namespace,crd export OBJECT_TYPE=node,clusterrole,clusterrolebinding,storageclass,namespace,crd

View File

@ -1,5 +1,5 @@
#!/bin/bash #!/bin/bash
set -ux set -x
export PARALLELISM_FACTOR=2 export PARALLELISM_FACTOR=2

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"image_name": "amf",
"project": "amf",
"repo": "amf",
"tag": "1.built",
"image_fullname": "cnf/amf:1.0",
"path": "images/amf",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/amf",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "amf",
"project": "amf",
"repo": "amf",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "amf",
"images": {
"applications": {
"amf": {
"tag": "1.0",
"name": "amf",
"repo": "amf"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"image_name": "ausf",
"project": "ausf",
"repo": "ausf",
"tag": "1.built",
"image_fullname": "cnf/ausf:1.0",
"path": "images/ausf",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/ausf",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "ausf",
"project": "ausf",
"repo": "ausf",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "ausf",
"images": {
"applications": {
"ausf": {
"tag": "1.0",
"name": "ausf",
"repo": "ausf"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_name": "mongodb",
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"project": "mongodb",
"repo": "mongodb",
"tag": "1.built",
"image_fullname": "cnf/mongodb:1.0",
"path": "images/mongodb",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/mongodb",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "mongodb",
"project": "mongodb",
"repo": "mongodb",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "mongodb",
"images": {
"applications": {
"mongodb": {
"tag": "1.0",
"name": "mongodb",
"repo": "mongodb"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"image_name": "nrf",
"project": "nrf",
"repo": "nrf",
"tag": "1.built",
"image_fullname": "cnf/nrf:1.0",
"path": "images/nrf",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/nrf",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "nrf",
"project": "nrf",
"repo": "nrf",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "nrf",
"images": {
"applications": {
"nrf": {
"tag": "1.0",
"name": "nrf",
"repo": "nrf"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"image_name": "pcf",
"project": "pcf",
"repo": "pcf",
"tag": "1.built",
"image_fullname": "cnf/pcf:1.0",
"path": "images/pcf",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/pcf",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "pcf",
"project": "pcf",
"repo": "pcf",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "pcf",
"images": {
"applications": {
"pcf": {
"tag": "1.0",
"name": "pcf",
"repo": "pcf"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"image_name": "smf",
"project": "smf",
"repo": "smf",
"tag": "1.built",
"image_fullname": "cnf/smf:1.0",
"path": "images/smf",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/smf",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "smf",
"project": "smf",
"repo": "smf",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "smf",
"images": {
"applications": {
"smf": {
"tag": "1.0",
"name": "smf",
"repo": "smf"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"image_name": "udm",
"project": "udm",
"repo": "udm",
"tag": "1.built",
"image_fullname": "cnf/udm:1.0",
"path": "images/udm",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/udm",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "udm",
"project": "udm",
"repo": "udm",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "udm",
"images": {
"applications": {
"udm": {
"tag": "1.0",
"name": "udm",
"repo": "udm"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
data:
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_from": "harbor-core.jarvis.local/library/ubuntu:focal",
"image_name": "udr",
"project": "udr",
"repo": "udr",
"tag": "1.built",
"image_fullname": "cnf/udr:1.0",
"path": "images/udr",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/udr",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "udr",
"project": "udr",
"repo": "udr",
"version": "0.1.0",
"build_from_source": true,
"path": "charts",
"build": {
"git_repo": "{{ $.Values.git_repo }}",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "{{ $.Values.refspec }}",
"version": "refs/changes/*:refs/changes/*"
},
"namespace": "development-pipeline",
"release_name": "udr",
"images": {
"applications": {
"udr": {
"tag": "1.0",
"name": "udr",
"repo": "udr"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -1,13 +1,17 @@
#!/bin/bash #!/bin/bash
set -ex set -ex
ldap_username="jarvis"
ldap_password="password"
ldap_email="jarvis@cluster.local"
harbor_core="harbor-core.jarvis.local" #Defined in harbor overrides, TODO, extract from there
#TODO(staceyF) Put this into appropriate jarvis-system tasks #TODO(staceyF) Put this into appropriate jarvis-system tasks
kubectl create ns development-pipeline || true kubectl create ns development-pipeline || true
kubectl create secret generic harbor-ca --from-file=harbor-ca=/etc/jarvis/certs/ca/ca.pem -n development-pipeline || true kubectl create secret generic harbor-ca --from-file=harbor-ca=/etc/jarvis/certs/ca/ca.pem -n development-pipeline || true
kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=$HOME/.kube/config -n development-pipeline || true kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=$HOME/.kube/config -n development-pipeline || true
#NOTE Will not be required once Harbor is backed by LDAP #NOTE Will not be required once Harbor is backed by LDAP
kubectl create secret generic harbor-basic-auth --from-literal=username='admin' --from-literal=password='Harbor12345' -n development-pipeline || true kubectl create secret generic harbor-basic-auth --from-literal=username=$ldap_username --from-literal=password=$ldap_password -n development-pipeline || true
kubectl create secret docker-registry harbor-docker-auth --docker-username=admin --docker-password=Harbor12345 --docker-email=example@gmail.com --docker-server=harbor-core.jarvis.local -n development-pipeline || true kubectl create secret docker-registry harbor-docker-auth --docker-username=$ldap_username --docker-password=$ldap_password --docker-email=$ldap_email --docker-server=$harbor_core -n development-pipeline || true
cd ./tools/images/standard-container cd ./tools/gate/jarvis/standard-container
sudo docker build --build-arg BASE_IMAGE=ubuntu:focal -t standard-container:1.0 . sudo docker build -t standard-container:1.0 .

View File

@ -28,6 +28,7 @@ for jarvis_project in `find ./tools/gate/jarvis/5G-SA-core -maxdepth 1 -mindepth
fi fi
# shellcheck disable=SC2046 # shellcheck disable=SC2046
# Copy development-pipeline to be
helm upgrade \ helm upgrade \
--create-namespace \ --create-namespace \
--install \ --install \
@ -57,6 +58,7 @@ for jarvis_project in `find ./tools/gate/jarvis/5G-SA-core -maxdepth 1 -mindepth
EOF EOF
popd popd
cp -a tools/gate/jarvis/5G-SA-core/${jarvis_project}/. "${jarvis_sanity_repo}" cp -a tools/gate/jarvis/5G-SA-core/${jarvis_project}/. "${jarvis_sanity_repo}"
cp -a tools/gate/jarvis/development-pipeline/* "${jarvis_sanity_repo}/jarvis/development-pipeline"
pushd "${jarvis_sanity_repo}" pushd "${jarvis_sanity_repo}"
git review -s git review -s
git add -A git add -A
@ -65,56 +67,41 @@ EOF
change_id=`git log -1 | grep Change-Id: | awk '{print $2}'` change_id=`git log -1 | grep Change-Id: | awk '{print $2}'`
popd popd
# Check jarvis pipeline run
end=$(date +%s)
timeout="1800"
end=$((end + timeout))
while true; do
result="$(curl -L https://gerrit.jarvis.local/changes/${change_id}/revisions/1/checks | tail -1 | jq -r .[].state)"
[ $result == "SUCCESSFUL" ] && break || [ $result == "FAILED" ] && break || true
sleep 5
now=$(date +%s)
if [ $now -gt $end ] ; then
echo "Pipeline failed to complete $timeout seconds"
exit 0
fi
done
### Ensure the repository is configured correctly ###
end=$(date +%s)
timeout="30"
end=$((end + timeout))
while true; do
if [ "$voting_ci" = "true" ];
then
# Check that Jarvis-System has reported the success of the pipeline run to Gerrit, by checking the value of the Verified label
VERIFIED="$(curl -L https://gerrit.jarvis.local/changes/${change_id}/revisions/1/review/ | tail -1 | jq -r .labels.Verified.all[0].value)"
[ "$VERIFIED" == 1 ] && break || true
sleep 5
now=$(date +%s)
if [ "$now" -gt "$end" ] ; then
echo "Jarvis-System has not verified the change"
exit 1
fi
else
# Ensure that the patchset doesn't have the Verified label available to it.
LABELS=$(curl -L https://gerrit.jarvis.local/changes/${change_id}/revisions/1/review/ | tail -1 | jq -r .labels)
if [ -z "$LABELS" ]; then
# The curl request didn't give us the labels available to this revision, try again when Gerrit is ready
sleep 5
continue
fi
VERIFIED_NULL="$( jq -r .Verified <<< "$LABELS" )"
if [ -z "$VERIFIED_NULL" ]; then
echo "Verified label found"
# Verified label should not be found, exit.
exit 1
else
# Labels curl returned all the labels successfully, and Verified was not in the list. This is desired.
break
fi
fi
done
COUNTER=$((COUNTER+1)) COUNTER=$((COUNTER+1))
done done
./tools/deployment/common/wait-for-pods.sh jarvis-projects
./tools/deployment/common/wait-for-pods.sh jarvis-8-1
# Check jarvis pipeline run
end=$(date +%s)
timeout="3400"
end=$((end + timeout))
change_id=8
while true; do
result="$(curl -L https://gerrit.jarvis.local/changes/${change_id}/revisions/1/checks | tail -1 | jq -r .[].state)"
[ $result == "SUCCESSFUL" ] && break || [ $result == "FAILED" ] && break || true
sleep 5
now=$(date +%s)
if [ $now -gt $end ] ; then
echo "Pipeline failed to complete $timeout seconds"
exit 1
fi
done
# Check that Jarvis-System has reported the success of the pipeline run to Gerrit
end=$(date +%s)
timeout="120"
end=$((end + timeout))
change_id=8
while true; do
VERIFIED="$(curl -L https://gerrit.jarvis.local/changes/${change_id}/revisions/1/review/ | tail -1 | jq -r .labels.Verified.all[0].value)"
[ "$VERIFIED" == 1 ] && break || true
sleep 5
now=$(date +%s)
if [ "$now" -gt "$end" ] ; then
echo "Jarvis-System has not verified the change"
exit 1
fi
done

View File

@ -1,11 +0,0 @@
#!/bin/bash
set -ex
cd ./tools/images
sudo make build IMAGE_FULLNAME=standard-container:1.0
cd ../../charts
helm upgrade --install development-pipeline -n development-pipeline ./development-pipeline
kubectl apply -n development-pipeline -f ./development-pipeline/config_map.yaml.example
kubectl create -n development-pipeline -f ./development-pipeline/pipelinerun-validation.yaml
../tools/deployment/common/wait-for-pipelinerun.sh development-pipeline development-pipeline

View File

@ -0,0 +1,6 @@
apiVersion: v2
name: development-pipeline
description: A Helm chart for Tekton Development pipeline
type: application
version: 0.1.0
appVersion: 1.16.0

View File

@ -0,0 +1,21 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
generateName: development-pipeline-run
spec:
pipelineRef:
name: development-pipeline
serviceAccountName: sa-development-pipeline
workspaces:
- name: k8s_cluster_data
configMap:
name: deployment-flow
- name: development_pipeline_data
volumeClaimTemplate:
spec:
storageClassName: standard
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,119 @@
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: development-pipeline
namespace: {{ $.Release.Namespace }}
spec:
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
tasks:
- name: microflow-setup-cluster-config
taskRef:
name: setup-cluster-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-setup-image-config
taskRef:
name: setup-image-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-setup-chart-config
taskRef:
name: setup-chart-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-setup-cleanup-config
taskRef:
name: setup-cleanup-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-k8s
runAfter:
- microflow-setup-cluster-config
taskRef:
name: k8s-cluster
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-images
runAfter:
- microflow-setup-image-config
- microflow-setup-cluster-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
taskRef:
name: build-images
- name: microflow-charts
runAfter:
- microflow-setup-chart-config
- microflow-setup-cluster-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
taskRef:
name: build-charts
- name: microflow-deployment-manifests
runAfter:
- microflow-setup-image-config
- microflow-setup-chart-config
- microflow-setup-cluster-config
- microflow-setup-cleanup-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
taskRef:
name: deployment-manifests
- name: microflow-functional
runAfter:
- microflow-deployment-manifests
- microflow-k8s
- microflow-images
- microflow-charts
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
taskRef:
name: functional
- name: microflow-promote-artifacts
runAfter:
- microflow-functional
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
taskRef:
name: promote
finally:
- name: microflow-cleanup
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
taskRef:
name: cleanup

View File

@ -0,0 +1,29 @@
{{- if $.Values.role.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ $.Values.role.name }}
namespace: {{ $.Release.Namespace }}
rules:
# EventListeners need to be able to fetch all namespaced resources
- apiGroups: ["triggers.tekton.dev"]
resources: ["eventlisteners", "triggerbindings", "triggertemplates", "triggers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
# Permissions to create resources in associated TriggerTemplates
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["impersonate", "get"]
# Permissions to execute helm dry-run
- apiGroups: [""]
resources: ["secrets", "services"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings"]
verbs: ["get"]
{{- end }}

View File

@ -0,0 +1,14 @@
{{- if and ($.Values.serviceAccount.create) ($.Values.role.bind) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ $.Values.role.name }}
namespace: {{ $.Release.Namespace }}
subjects:
- kind: ServiceAccount
name: {{ $.Values.serviceAccount.name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ $.Values.role.name }}
{{- end }}

View File

@ -0,0 +1,7 @@
{{- if $.Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ $.Values.serviceAccount.name }}
namespace: {{ $.Release.Namespace }}
{{- end }}

View File

@ -0,0 +1,71 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-charts
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task builds charts if source is provided
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: clone
volumeMounts:
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
update-ca-certificates
ansible-playbook -vvv {{ $.Values.tasks.git.gitPlaybook }} -i hosts -e '{"stage":"clone"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
- name: lint-chart
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.chart.chartPlaybook }} -i hosts -e '{"stage":"lint"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output-after-lint-dryrun
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
- name: package-chart
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.chart.chartPlaybook }} -i hosts -e '{"stage":"package"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output-after-packaging
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
- name: publish-chart
volumeMounts:
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
- mountPath: /workspace/helm-creds
name: helm-publish-creds
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
update-ca-certificates
ansible-playbook -vvv {{ $.Values.tasks.chart.chartPlaybook }} -i hosts -e '{"stage":"publish"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output-after-publish
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
volumes:
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth
- name: harbor-ca
secret:
secretName: harbor-ca

View File

@ -0,0 +1,28 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: cleanup
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task gathers logs and cleans up the environment
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: cleanup
image: {{ $.Values.tasks.cleanup.cleanupImage }}
volumeMounts:
- mountPath: $(workspaces.development_pipeline_data.path)/config
name: kubeconfig
script: |
#!/bin/sh
ansible-playbook -vvv "{{ $.Values.tasks.cleanup.cleanupPlaybook }}" -i hosts \
-e @"$(workspaces.development_pipeline_data.path)/default.json" \
-e @"$(workspaces.development_pipeline_data.path)/chart.json" \
-e @"$(workspaces.development_pipeline_data.path)/image.json" \
-e @"$(workspaces.development_pipeline_data.path)/cluster.json"
volumes:
- name: kubeconfig
secret:
secretName: kubeconfig-secret

View File

@ -0,0 +1,16 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: deployment-manifests
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task will do any validation of manifests required to deploy the CNF
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: example-validation-step
image: {{ $.Values.tasks.deploymentManifests.deploymentManifestsImage }}
script: |
ansible-playbook -vvv "{{ $.Values.tasks.deploymentManifests.deploymentManifestsPlaybook }}" -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json"

View File

@ -0,0 +1,41 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: functional
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task will deploy the CNF and run any tests specified
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: deploy-helm-charts
image: {{ $.Values.tasks.functional.functionalDeployImage }}
volumeMounts:
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
- mountPath: /workspace/helm-creds
name: helm-publish-creds
- mountPath: $(workspaces.development_pipeline_data.path)/config
name: kubeconfig
script: |
#!/bin/sh
update-ca-certificates
ansible-playbook -vvv {{ $.Values.tasks.functional.functionalPlaybook }} -i hosts -e '{"stage":"deploy"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/cluster.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: run-helm-tests
image: {{ $.Values.tasks.functional.functionalTestImage }}
script: |
#!/bin/sh
ansible-playbook -vvv {{ $.Values.tasks.functional.functionalPlaybook }} -i hosts -e '{"stage":"test"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/cluster.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
volumes:
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth
- name: harbor-ca
secret:
secretName: harbor-ca
- name: kubeconfig
secret:
secretName: kubeconfig-secret

View File

@ -0,0 +1,130 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task builds images if source is provided
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: clone
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
name: dind-certs
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
script: |
#!/usr/bin/env sh
update-ca-certificates
ansible-playbook -vvv {{ $.Values.tasks.git.gitPlaybook }} -i hosts -e '{"stage":"clone"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
- name: set-image-output
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
- name: docker-build
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
name: dind-certs
env:
# Connect to the sidecar over TCP, with TLS.
- name: DOCKER_HOST
value: tcp://localhost:2376
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: '1'
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.image.imagePlaybook }} -i hosts -e '{"stage":"build"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
- name: set-image-build-output-after-build
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
- name: publish-and-scan-image
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
name: dind-certs
- mountPath: /tekton/home/.docker/config.json
name: image-push-creds
subPath: .dockerconfigjson
env:
# Connect to the sidecar over TCP, with TLS.
- name: DOCKER_HOST
value: tcp://localhost:2376
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: '1'
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.image.imagePlaybook }} -i hosts -e '{"stage":"push"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
- name: set-image-output-after-publish-scan
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
- name: get-scan-results
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
name: dind-certs
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.image.imagePlaybook }} -i hosts -e '{"stage":"scan_results"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
- name: set-image-output-set-scan-results
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
sidecars:
- image: {{ $.Values.tasks.image.sidecarServer }}
name: server
args:
- --storage-driver=overlay2
- --userland-proxy=false
- --debug
- --insecure-registry={{ $.Values.tasks.image.insecureRegistry }}
securityContext:
privileged: true
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
volumeMounts:
- mountPath: /certs/client
name: dind-certs
- mountPath: /var/lib/docker
name: var-lib-docker
# Wait for the dind daemon to generate the certs it will share with the
# client.
readinessProbe:
periodSeconds: 1
exec:
command: ['ls', '/certs/client/ca.pem']
volumes:
- name: dind-certs
emptyDir: {}
- name: var-lib-docker
emptyDir: {}
- name: image-push-creds
secret:
secretName: harbor-docker-auth
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth
- name: harbor-ca
secret:
secretName: harbor-ca

View File

@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: k8s-cluster
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task will create a k8s cluster if needed or verify that an existing cluster is reachable
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: generate-kubeconfig
image: {{ $.Values.tasks.kubernetes.createClusterImage }}
script: |
ansible-playbook -vvv "{{ $.Values.tasks.kubernetes.getKubeconfigPlaybook }}" -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/cluster.json"
volumeMounts:
- mountPath: $(workspaces.development_pipeline_data.path)/config
name: kubeconfig
volumes:
- name: kubeconfig
secret:
secretName: kubeconfig-secret

View File

@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: promote
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task will promote images and chart into a non-test repository
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: promote-artifacts
image: {{ $.Values.tasks.promote.promoteImage }}
env:
# Connect to the sidecar over TCP, with TLS.
- name: DOCKER_HOST
value: tcp://localhost:2376
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: '1'
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
volumeMounts:
- mountPath: /tekton/home/.docker/config.json
name: image-push-creds
subPath: .dockerconfigjson
- mountPath: /certs/client
name: dind-certs
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
- mountPath: /workspace/helm-creds
name: helm-publish-creds
script: |
#!/usr/bin/env sh
set -ex
update-ca-certificates
ansible-playbook -vvv {{ $.Values.tasks.promote.promotePlaybook }} -i hosts -e '{"stage":"promote_image"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
ansible-playbook -vvv {{ $.Values.tasks.promote.promotePlaybook }} -i hosts -e '{"stage":"promote_chart"}' -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
sidecars:
- image: {{ $.Values.tasks.image.sidecarServer }}
name: server
args:
- --storage-driver=vfs
- --userland-proxy=false
- --debug
- --insecure-registry={{ $.Values.tasks.image.insecureRegistry }}
securityContext:
privileged: true
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the
# client.
readinessProbe:
periodSeconds: 1
exec:
command: ['ls', '/certs/client/ca.pem']
volumes:
- name: dind-certs
emptyDir: {}
- name: image-push-creds
secret:
secretName: harbor-docker-auth
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth
- name: harbor-ca
secret:
secretName: harbor-ca

View File

@ -0,0 +1,84 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-cluster-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-cluster-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/default.json" "$(workspaces.development_pipeline_data.path)/default.json"
cp "$(workspaces.k8s_cluster_data.path)/cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"
jq '.cluster_kubeconfig_path="$(workspaces.development_pipeline_data.path)/config"' "$(workspaces.development_pipeline_data.path)/cluster.json" > "$(workspaces.development_pipeline_data.path)/temp_cluster.json" && mv "$(workspaces.development_pipeline_data.path)/temp_cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-image-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-image-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/image.json" "$(workspaces.development_pipeline_data.path)/image.json"
echo "Set temporary image name to test/scan-image:$(context.taskRun.uid)"
jq '.image_fullname="test/scan-image:$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/image.json" > "$(workspaces.development_pipeline_data.path)/temp_image.json" && mv "$(workspaces.development_pipeline_data.path)/temp_image.json" "$(workspaces.development_pipeline_data.path)/image.json"
echo "Set tag to context $(context.taskRun.uid)"
jq '.tag="$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/image.json" > "$(workspaces.development_pipeline_data.path)/temp_image.json" && mv "$(workspaces.development_pipeline_data.path)/temp_image.json" "$(workspaces.development_pipeline_data.path)/image.json"
echo "Set checkout location for git repository to $(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"
jq '.build.checkout_loc="$(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/image.json" > "$(workspaces.development_pipeline_data.path)/temp_image.json" && mv "$(workspaces.development_pipeline_data.path)/temp_image.json" "$(workspaces.development_pipeline_data.path)/image.json"
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-chart-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-chart-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/default.json" "$(workspaces.development_pipeline_data.path)/default.json"
cp "$(workspaces.k8s_cluster_data.path)/chart.json" "$(workspaces.development_pipeline_data.path)/chart.json"
echo "Set tag to context $(context.taskRun.uid)"
jq '.tag="$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/chart.json" > "$(workspaces.development_pipeline_data.path)/temp_chart.json" && mv "$(workspaces.development_pipeline_data.path)/temp_chart.json" "$(workspaces.development_pipeline_data.path)/chart.json"
echo "Set checkout location for git repository to $(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"
jq '.build.checkout_loc="$(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/chart.json" > "$(workspaces.development_pipeline_data.path)/temp_chart.json" && mv "$(workspaces.development_pipeline_data.path)/temp_chart.json" "$(workspaces.development_pipeline_data.path)/chart.json"
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-cleanup-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-cleanup-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/cleanup.json" "$(workspaces.development_pipeline_data.path)/cleanup.json"

View File

@ -0,0 +1,45 @@
serviceAccount:
create: false
name: sa-development-pipeline
role:
bind: true
name: development-pipeline
create: true
pvc:
storageClass: standard
size: 1Gi
tasks:
kubernetes:
createClusterImage: &base_image standard-container:1.0
createClusterPlaybook: /playbooks/create-cluster.yaml
getKubeconfigPlaybook: /playbooks/get-kubeconfig.yaml
validateClusterPlaybook: /playbooks/validate-cluster.yaml
setup:
setupConfigImage: *base_image
git:
gitPlaybook: /playbooks/git-microflow.yaml
image:
buildImage: *base_image
sidecarServer: docker:19-dind
insecureRegistry: harbor-core.jarvis.local
imagePlaybook: /playbooks/images-microflow.yaml
name: standard-container.yaml
chart:
buildChartImage: *base_image
chartPlaybook: /playbooks/charts-microflow.yaml
deploymentManifests:
deploymentManifestsImage: *base_image
deploymentManifestsPlaybook: /playbooks/deployment-manifests.yaml
promote:
promoteImage: *base_image
promotePlaybook: /playbooks/promote-microflow.yaml
functional:
functionalDeployImage: *base_image
functionalTestImage: *base_image
functionalPlaybook: /playbooks/functional-microflow.yaml
cleanup:
cleanupImage: *base_image
cleanupPlaybook: /playbooks/cleanup.yaml

View File

@ -0,0 +1,53 @@
ARG BASE_IMAGE=ubuntu:20.04
FROM ${BASE_IMAGE}
SHELL ["bash", "-exc"]
ENV DEBIAN_FRONTEND noninteractive
# Update distro and install ansible
RUN apt-get update ;\
apt-get dist-upgrade -y ;\
apt-get install -y \
python3-minimal \
python3-pip \
python3-setuptools \
make \
sudo \
git \
jq \
curl \
git-review \
apt-transport-https \
ca-certificates \
gnupg-agent \
software-properties-common \
gettext-base ;\
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - ;\
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" ;\
apt-get install -y --no-install-recommends \
docker-ce-cli ;\
pip3 install --upgrade wheel ;\
pip3 install ansible ;\
ansible-galaxy collection install community.kubernetes ;\
pip3 install docker ;\
# Install kubectl
apt-get install -y --no-install-recommends \
apt-transport-https \
gnupg2 ;\
curl -o /usr/bin/kubectl -L "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" ;\
chmod +x /usr/bin/kubectl ;\
# Install Helm
curl -fsSL -o helm-install.tar.gz https://get.helm.sh/helm-v3.4.2-linux-amd64.tar.gz \
&& tar -xvf helm-install.tar.gz \
&& rm helm-install.tar.gz \
&& mv linux-amd64/helm /usr/local/bin/helm \
&& chmod +x /usr/local/bin/helm \
&& helm --help ;\
helm plugin install https://github.com/chartmuseum/helm-push ;\
rm -rf /var/lib/apt/lists/*
COPY assets /opt/assets/
RUN cp -ravf /opt/assets/* / ;\
rm -rf /opt/assets
ENTRYPOINT /entrypoint.sh

View File

@ -0,0 +1,3 @@
#!/bin/bash
ansible --version

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- { role: charts, vars: { stage: "default" } }

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Clean up release
include_tasks: ./roles/cleanup/tasks/cleanup.yaml

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: validate-deployment-manifests-config
include_tasks: ./roles/deployment-manifests/tasks/validate-config.yaml

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- { role: functional, vars: { stage: "default" } }

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Get kubeconfig for Kubernetes cluster to deploy CNF
include_tasks: ./roles/kubernetes/tasks/get-kubeconfig.yaml

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- { role: git, vars: { stage: "default" } }

View File

@ -0,0 +1 @@
localhost ansible_connection=local

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- { role: images, vars: { stage: "default" } }

View File

@ -0,0 +1,4 @@
---
- hosts: localhost
roles:
- { role: promote, vars: { stage: "default" } }

View File

@ -0,0 +1,36 @@
- name: Helm Lint
block:
- name: Helm lint "{{ chart_name }}"
command: "helm lint {{ chart_name }}"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"
- name: Helm Dry-run "{{ chart_name }}"
command: "helm install --dry-run {{ chart_name }} {{ chart_name }}"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"
when: ( stage == "lint")
become: true
- name: Helm Package
block:
- name: Package Helm Chart
shell: helm package "{{ chart_name }}"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"
when: ( stage == "package")
- name: Helm Publish
block:
- name: Install Plugin
shell: helm plugin update push || helm plugin install https://github.com/chartmuseum/helm-push || true
- name: Get harbor username
shell: cat {{ harbor_secret_mounted_path }}/username
register: harbor_username
- name: Get harbor password
shell: cat {{ harbor_secret_mounted_path }}/password
register: harbor_password
- name: Add Harbor Helm repository and Test repository
shell: helm repo add "{{ chart_repository }}-staging" "https://{{ docker_registry }}/chartrepo/{{ chart_name }}-staging" --username={{ harbor_username.stdout }} --password={{ harbor_password.stdout }}
- name: Push chart "{{ chart_name }}" to Harbor staging registry
command: helm push "{{ chart_name }}-{{ version }}".tgz "{{ chart_repository }}-staging"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"
when: ( stage == "publish")

View File

@ -0,0 +1,3 @@
#Remove any resources deployed
- name: Remove test release
shell: helm delete --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" "{{ release_name }}" -n "{{ namespace }}"

View File

@ -0,0 +1,4 @@
- name: This task is to validate deployment manifests
command: echo "It can do anything you want, just put it right here."
args:
chdir: "/workspace"

View File

@ -0,0 +1,4 @@
#Test Deployed CNF
- name: Chart has no Helm Tests, echo for now
shell: echo "There are no helm tests yet"
# shell: echo 'helm test --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" "{{ name }}"'

View File

@ -0,0 +1,22 @@
- name: Functional Deploy
block:
#Deploy CNF
- name: Get harbor username
shell: cat {{ harbor_secret_mounted_path }}/username
register: harbor_username
- name: Get harbor password
shell: cat {{ harbor_secret_mounted_path }}/password
register: harbor_password
#TODO dex-aio doesn't install, look into another test chart
- name: Add Harbor Helm repository and Test repository
shell: helm repo add "{{ chart_repository }}-staging" "https://{{ docker_registry }}/chartrepo/{{ project }}-staging" --username={{ harbor_username.stdout }} --password={{ harbor_password.stdout }}
- name: Deploy chart "{{ chart_name }}"
shell: helm upgrade --install --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" "{{ release_name }}" "{{ chart_repository }}-staging/{{ chart_name }}" --version="{{ version }}" --namespace="{{ namespace }}" --username="{{ harbor_username.stdout }}" --password="{{ harbor_password.stdout }}" --create-namespace
when: ("{{ stage }}" == "deploy")
become: true
- name: Functional Test
block:
#Test Deployed CNF
- name: Chart currently has no Helm Tests, echo for now
shell: echo "There are no helm tests yet"
when: ( stage == "test")

View File

@ -0,0 +1,9 @@
#Build docker image using Makefile given git repository location to clone code from
- name: Clone repository
block:
- git:
repo: "{{ build.git_repo }}"
dest: "{{ build.checkout_loc }}"
version: "{{ build.refspec }}"
refspec: "refs/changes/*:refs/changes/*"
when: ( stage == "clone")

View File

@ -0,0 +1,45 @@
- name: Image Build
block:
#Build docker image using Makefile given git repository location to clone code from
- name: Build Docker Image for "{{ image_name }}"
shell: docker build -t "{{ image_fullname }}" .
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"
when: ("{{ stage }}" == "build")
become: true
- name: Tag and Push Image
block:
- name: Tag image to Harbor url
shell: docker tag "{{ image_fullname }}" "{{ docker_registry }}/{{ project }}-staging/{{ repo }}:{{ tag }}"
- name: Push image to Harbor
shell: docker push "{{ docker_registry }}/{{ project }}-staging/{{ repo }}:{{ tag }}"
when: ( stage == "push")
- name: Get Scan Results
block:
#Scan results may take some time, putting in some retries and a delay to determine if scan results get finished
- name: output the request
shell: echo "https://{{ docker_registry }}/api/v2.0/projects/{{ project }}-staging/repositories/{{ repo | replace('/','%2F') }}/artifacts/{{ tag }}?page=1&page_size=10&with_tag=true&with_label=false&with_scan_overview=true&with_signature=false&with_immutable_status=false"
- name: Get Scan Results
uri:
validate_certs: false
url: "https://{{ docker_registry }}/api/v2.0/projects/{{ project }}-staging/repositories/{{ repo | replace('/','%2F') }}/artifacts/{{ tag }}?page=1&page_size=10&with_tag=true&with_label=false&with_scan_overview=true&with_signature=false&with_immutable_status=false"
method: GET
body_format: "json"
headers:
accept: "application/json"
X-Request-Id: "12345"
#Change to encoded from configmap
authorization: "Basic YWRtaW46SGFyYm9yMTIzNDU="
register: result
until: result.json.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"].scan_status == "Success"
retries: 5
delay: 30
- name: Check Scan Results Summary for High and Critical CVE
#shell: echo '{{ result.json.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"] }}'
set_fact:
image_status: "Vulnerable"
when: result.json.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"].severity in ("High","Critical")
when: ( stage == "scan_results")

View File

@ -0,0 +1,10 @@
#Validate Kubernetes cluster is accessible.
- set_fact:
existing_cluster: "{{ use_existing_cluster }}"
- name: Get kubeconfig
shell: echo "Retrieve kubeconfig from mounted secret"
when: existing_cluster
- name: Validate kubeconfig
shell: kubectl --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" get pods -n development-pipeline
- name: Find existing service accounts
shell: kubectl --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" get serviceaccounts -n development-pipeline

View File

@ -0,0 +1,29 @@
- name: Promote Chart
block:
- name: Install Plugin
shell: helm plugin update push || helm plugin install https://github.com/chartmuseum/helm-push || true
- name: Get harbor username
shell: cat {{ harbor_secret_mounted_path }}/username
register: harbor_username
- name: Get harbor password
shell: cat {{ harbor_secret_mounted_path }}/password
register: harbor_password
- name: Install Plugin
shell: helm plugin update push || helm plugin install https://github.com/chartmuseum/helm-push
- name: Add Harbor Helm repository and Test repository
shell: helm repo add "{{ chart_repository }}-staging" "https://{{ chart_registry_url }}/{{ chart_name }}-staging" --username="{{ harbor_username.stdout }}" --password="{{ harbor_password.stdout }}" && helm repo add "{{ chart_repository }}" "https://{{ chart_registry_url }}/{{ chart_name }}" --username="{{ harbor_username.stdout }}" --password="{{ harbor_password.stdout }}"
- name: Pull down Helm Chart
shell: helm pull "{{ chart_repository }}-staging/{{ chart_name }}" --version="{{ version }}"
- name: Push chart "{{ chart_name }}" to Helm registry
command: helm push "{{ chart_name }}-{{ version }}".tgz "{{ chart_repository }}"
when: ( stage == "promote_chart")
become: true
- name: Promote Image
block:
- name: Pull image locally
shell: docker pull "{{ docker_registry }}/{{ project }}-staging/{{ repo }}:{{ tag }}"
- name: Tag image
shell: docker tag "{{ docker_registry }}/{{ project }}-staging/{{ repo }}:{{ tag }}" "{{ docker_registry }}/{{ project }}/{{ repo }}:{{ tag }}"
- name: Push to non-staging Harbor Project
shell: docker push "{{ docker_registry }}/{{ project }}/{{ repo }}:{{ tag }}"
when: ( stage == "promote_image")

View File

@ -0,0 +1,5 @@
#!/bin/bash
cp "$(workspaces.k8s_cluster_data.path)/default.json" "$(workspaces.development_pipeline_data.path)/default.json"
cp "$(workspaces.k8s_cluster_data.path)/cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"
jq '.cluster_kubeconfig_path="$(workspaces.development_pipeline_data.path)/config"' "$(workspaces.development_pipeline_data.path)/cluster.json" > "$(workspaces.development_pipeline_data.path)/temp_cluster.json" && mv "$(workspaces.development_pipeline_data.path)/temp_cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: harbor-helm-creds
namespace: development-pipeline
type: kubernetes.io/basic-auth
stringData:
username: admin
password: Harbor12345

View File

@ -42,7 +42,6 @@
- ./tools/gate/jarvis/650-temporary-setup.sh - ./tools/gate/jarvis/650-temporary-setup.sh
- ./tools/gate/jarvis/700-deploy-jarvis-system.sh - ./tools/gate/jarvis/700-deploy-jarvis-system.sh
- ./tools/gate/jarvis/800-deploy-jarvis-projects.sh - ./tools/gate/jarvis/800-deploy-jarvis-projects.sh
- ./tools/gate/jarvis/900-development-pipeline.sh
- job: - job:
name: airship-jarvis-sample-workload-validation name: airship-jarvis-sample-workload-validation