Update PROXY, TARGET_NODE parameters

This PS adds PROXY parameter system executable script.
make uses PROXY parameter to pass proxy while building image

This change also adds TARGET_NODE,CLUSTER_NAMESPACE parameter to pass
node details and cluster details for further operations

Change-Id: I9ff8e12ff679526b728c55ffd23c3ed513db4589
This commit is contained in:
Battina, Sai (sb464f) 2020-12-22 12:42:24 -06:00 committed by sai battina
parent 41d353444f
commit 0f0e01ea0e
7 changed files with 24 additions and 6 deletions

View File

@ -81,6 +81,7 @@ images:
baremetal_operator:
ironic: # ironic Deployment
init_bootstrap: centos
init_images: quay.io/airshipit/ipa:latest
dnsmasq: quay.io/metal3-io/ironic:capm3-v0.4.0
httpd: quay.io/metal3-io/ironic:capm3-v0.4.0
ironic: quay.io/metal3-io/ironic:capm3-v0.4.0

View File

@ -2,5 +2,6 @@
set -xe
#Copy files to shared mount
mkdir -p /shared/html/images/
cp -f /ipa-ubuntu-master* /shared/html/images/
chmod 777 -R /shared/html/images/

View File

@ -19,6 +19,15 @@ replacements:
kind: Deployment
name: ironic
fieldrefs: ["{.spec.template.spec.initContainers[?(.name == 'init-bootstrap')].image}"]
- source:
objref:
name: versions-airshipctl
fieldref: "{.images.baremetal_operator.ironic.init_images}"
target:
objref:
kind: Deployment
name: ironic
fieldrefs: ["{.spec.template.spec.initContainers[?(.name == 'init-images')].image}"]
- source:
objref:
name: versions-airshipctl

View File

@ -53,6 +53,7 @@ template: |
bmc:
address: {{ $host.bmcAddress }}
credentialsName: {{ $hostName }}-bmc-secret
disableCertificateVerification: {{ $host.disableCertificateVerification }}
firmware:
{{ toYaml $hardwareProfile.firmware | indent 4 }}
{{- /* If no raid is defined for a host, simply skip. There is no default setting for raid */ -}}

View File

@ -15,6 +15,7 @@ hosts:
bmcAddress: redfish+http://10.23.25.1:8000/redfish/v1/Systems/air-target-1
bmcUsername: root
bmcPassword: r00tme
disableCertificateVerification: false
ipAddresses:
oam-ipv4: 10.23.25.102
pxe-ipv4: 10.23.24.102
@ -28,6 +29,7 @@ hosts:
bmcAddress: redfish+http://10.23.25.2:8000/redfish/v1/Systems/air-target-2
bmcUsername: username
bmcPassword: password
disableCertificateVerification: false
ipAddresses:
oam-ipv4: 10.23.25.101
pxe-ipv4: 10.23.24.101
@ -41,6 +43,7 @@ hosts:
bmcAddress: redfish+http://10.23.25.1:8000/redfish/v1/Systems/air-worker-1
bmcUsername: username
bmcPassword: password
disableCertificateVerification: false
ipAddresses:
oam-ipv4: 10.23.25.103
pxe-ipv4: 10.23.24.103

View File

@ -18,6 +18,7 @@ export USE_PROXY=${USE_PROXY:-"false"}
export HTTPS_PROXY=${HTTPS_PROXY:-${https_proxy}}
export HTTP_PROXY=${HTTP_PROXY:-${http_proxy}}
export NO_PROXY=${NO_PROXY:-${no_proxy}}
export PROXY=${PROXY:-${http_proxy}}
echo "Build airshipctl docker images"
make images

View File

@ -19,12 +19,14 @@ export TIMEOUT=${TIMEOUT:-3600}
export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
export TARGET_NODE=${TARGET_NODE:-"node01"}
export CLUSTER_NAMESPACE=${CLUSTER_NAMESPACE:-"default"}
echo "Check Cluster Status"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get cluster target-cluster -o json | jq '.status.controlPlaneReady'
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT -n $CLUSTER_NAMESPACE get cluster target-cluster -o json | jq '.status.controlPlaneReady'
echo "Annotate BMH for target node"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT annotate bmh node01 baremetalhost.metal3.io/paused=true
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT -n $CLUSTER_NAMESPACE annotate bmh $TARGET_NODE baremetalhost.metal3.io/paused=true
echo "Move Cluster Object to Target Cluster"
airshipctl phase run clusterctl-move
@ -37,11 +39,11 @@ kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get pods -
end=$(($(date +%s) + $TIMEOUT))
echo "Waiting $TIMEOUT seconds for crds to be created."
while true; do
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get cluster target-cluster -o json | jq '.status.controlPlaneReady' | grep -q true) ; then
if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT -n $CLUSTER_NAMESPACE get cluster target-cluster -o json | jq '.status.controlPlaneReady' | grep -q true) ; then
echo -e "\nGet CRD status"
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get bmh
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get machines
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get clusters
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT -n $CLUSTER_NAMESPACE get bmh
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT -n $CLUSTER_NAMESPACE get machines
kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT -n $CLUSTER_NAMESPACE get clusters
break
else
now=$(date +%s)