Ironic: move cleaning net managment to job

This PS moves the cleaning net managment to a job.

Additionally the gate scripts are updated to enable single node
operation for development use.

Change-Id: Icb7015bcceaf93bc65f60399caf785fbdcf72413
This commit is contained in:
portdirect 2018-02-11 16:26:01 -05:00
parent 9c7f89db1d
commit 86d5b1ce05
12 changed files with 212 additions and 54 deletions

View File

@ -21,7 +21,8 @@ COMMAND="${@:-start}"
function start () {
exec ironic-api \
--config-file /etc/ironic/ironic.conf
--config-file /etc/ironic/ironic.conf \
--config-file /tmp/pod-shared/cleaning-network.conf
}
function stop () {

View File

@ -36,7 +36,7 @@ if [ "x" == "x${PXE_IP}" ]; then
exit 1
fi
cat <<EOF>/tmp/pod-shared/conductor-local-ip.conf
tee /tmp/pod-shared/conductor-local-ip.conf << EOF
[DEFAULT]
# IP address of this host. If unset, will determine the IP

View File

@ -23,4 +23,5 @@ mkdir -p /var/lib/openstack-helm/ironic/master_images
exec ironic-conductor \
--config-file /etc/ironic/ironic.conf \
--config-file /tmp/pod-shared/conductor-local-ip.conf
--config-file /tmp/pod-shared/conductor-local-ip.conf \
--config-file /tmp/pod-shared/cleaning-network.conf

View File

@ -0,0 +1,47 @@
#!/bin/bash
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
if ! openstack network show ${neutron_network_name}; then
IRONIC_NEUTRON_CLEANING_NET_ID=$(openstack network create -f value -c id \
--share \
--provider-network-type flat \
--provider-physical-network ${neutron_provider_network} \
${neutron_network_name})
else
IRONIC_NEUTRON_CLEANING_NET_ID=$(openstack network show ${neutron_network_name} -f value -c id)
fi
for SUBNET in $(openstack network show $IRONIC_NEUTRON_CLEANING_NET_ID -f value -c subnets); do
CURRENT_SUBNET=$(openstack subnet show $SUBNET -f value -c name)
if [ "x${CURRENT_SUBNET}" == "x${neutron_subnet_name}" ]; then
openstack subnet show ${neutron_subnet_name}
SUBNET_EXISTS=true
fi
done
if [ "x${SUBNET_EXISTS}" != "xtrue" ]; then
openstack subnet create \
--gateway ${neutron_subnet_gateway%/*} \
--allocation-pool start=${neutron_subnet_alloc_start},end=${neutron_subnet_alloc_end} \
--dns-nameserver 10.96.0.10 \
--subnet-range ${neutron_subnet_cidr} \
--network ${neutron_network_name} \
${neutron_subnet_name}
fi

View File

@ -0,0 +1,25 @@
#!/bin/bash
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
set -ex
IRONIC_NEUTRON_CLEANING_NET_ID=$(openstack network show ${neutron_network_name} -f value -c id)
tee /tmp/pod-shared/cleaning-network.conf <<EOF
[neutron]
cleaning_network_uuid = ${IRONIC_NEUTRON_CLEANING_NET_ID}
EOF

View File

@ -26,6 +26,10 @@ data:
bootstrap.sh: |+
{{ tuple "bin/_bootstrap.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}
manage-cleaning-network.sh: |
{{ tuple "bin/_manage-cleaning-network.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
retreive-cleaning-network.sh: |
{{ tuple "bin/_retreive-cleaning-network.sh.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
db-init.py: |
{{- include "helm-toolkit.scripts.db_init" . | indent 4 }}
db-sync.sh: |

View File

@ -47,6 +47,24 @@ spec:
terminationGracePeriodSeconds: {{ .Values.pod.lifecycle.termination_grace_period.api.timeout | default "30" }}
initContainers:
{{ tuple $envAll $dependencies $mounts_ironic_api_init | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
- name: ironic-retrive-cleaning-net
image: {{ .Values.images.tags.retrive_cleaning_network }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.conductor | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
{{- with $env := dict "ksUserSecret" .Values.secrets.identity.ironic }}
{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }}
{{- end }}
{{ include "helm-toolkit.utils.to_k8s_env_vars" $envAll.Values.network.pxe | indent 12 }}
command:
- /tmp/retreive-cleaning-network.sh
volumeMounts:
- name: ironic-bin
mountPath: /tmp/retreive-cleaning-network.sh
subPath: retreive-cleaning-network.sh
readOnly: true
- name: pod-shared
mountPath: /tmp/pod-shared
containers:
- name: ironic-api
image: {{ .Values.images.tags.ironic_api }}
@ -79,6 +97,8 @@ spec:
mountPath: /etc/ironic/policy.json
subPath: policy.json
readOnly: true
- name: pod-shared
mountPath: /tmp/pod-shared
{{- if $mounts_ironic_api.volumeMounts }}{{ toYaml $mounts_ironic_api.volumeMounts | indent 12 }}{{ end }}
volumes:
- name: ironic-bin
@ -89,5 +109,7 @@ spec:
configMap:
name: ironic-etc
defaultMode: 0444
- name: pod-shared
emptyDir: {}
{{- if $mounts_ironic_api.volumes }}{{ toYaml $mounts_ironic_api.volumes | indent 8 }}{{ end }}
{{- end }}

View File

@ -0,0 +1,62 @@
{{/*
Copyright 2017 The Openstack-Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/}}
{{- if .Values.manifests.job_manage_cleaning_network }}
{{- $envAll := . }}
{{- $dependencies := .Values.dependencies.manage_cleaning_network }}
{{- $serviceAccountName := "ironic-manage-cleaning-network" }}
{{ tuple $envAll $dependencies $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: ironic-manage-cleaning-network
spec:
template:
metadata:
labels:
{{ tuple $envAll "ironic" "manage-cleaning-network" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
spec:
serviceAccountName: {{ $serviceAccountName }}
restartPolicy: OnFailure
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
initContainers:
{{ tuple $envAll $dependencies list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
containers:
- name: ironic-manage-cleaning-network
image: {{ .Values.images.tags.manage_cleaning_network }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.jobs.manage_cleaning_network | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
{{- with $env := dict "ksUserSecret" .Values.secrets.identity.ironic }}
{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }}
{{- end }}
{{ include "helm-toolkit.utils.to_k8s_env_vars" $envAll.Values.network.pxe | indent 12 }}
command:
- /tmp/manage-cleaning-network.sh
volumeMounts:
- name: ironic-bin
mountPath: /tmp/manage-cleaning-network.sh
subPath: manage-cleaning-network.sh
readOnly: true
volumes:
- name: ironic-bin
configMap:
name: ironic-bin
defaultMode: 0555
{{- end }}

View File

@ -67,7 +67,7 @@ spec:
{{ tuple $envAll $envAll.Values.pod.resources.conductor | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: PROVISIONER_INTERFACE
value: {{ .Values.network.interface.provisioner }}
value: {{ .Values.network.pxe.device }}
command:
- /tmp/ironic-conductor-init.sh
volumeMounts:
@ -83,7 +83,7 @@ spec:
{{ tuple $envAll $envAll.Values.pod.resources.conductor | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
- name: PROVISIONER_INTERFACE
value: {{ .Values.network.interface.provisioner }}
value: {{ .Values.network.pxe.device }}
command:
- /tmp/ironic-conductor-http-init.sh
volumeMounts:
@ -97,6 +97,24 @@ spec:
readOnly: true
- name: pod-shared
mountPath: /tmp/pod-shared
- name: ironic-retrive-cleaning-net
image: {{ .Values.images.tags.retrive_cleaning_network }}
imagePullPolicy: {{ .Values.images.pull_policy }}
{{ tuple $envAll $envAll.Values.pod.resources.conductor | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
env:
{{- with $env := dict "ksUserSecret" .Values.secrets.identity.ironic }}
{{- include "helm-toolkit.snippets.keystone_openrc_env_vars" $env | indent 12 }}
{{- end }}
{{ include "helm-toolkit.utils.to_k8s_env_vars" $envAll.Values.network.pxe | indent 12 }}
command:
- /tmp/retreive-cleaning-network.sh
volumeMounts:
- name: ironic-bin
mountPath: /tmp/retreive-cleaning-network.sh
subPath: retreive-cleaning-network.sh
readOnly: true
- name: pod-shared
mountPath: /tmp/pod-shared
containers:
- name: ironic-conductor
image: {{ .Values.images.tags.ironic_conductor }}
@ -141,7 +159,7 @@ spec:
privileged: true
env:
- name: PROVISIONER_INTERFACE
value: {{ .Values.network.interface.provisioner }}
value: {{ .Values.network.pxe.device }}
command:
- /tmp/ironic-conductor-pxe.sh
volumeMounts:

View File

@ -23,6 +23,8 @@ labels:
images:
tags:
manage_cleaning_network: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
retrive_cleaning_network: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
bootstrap: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
db_init: docker.io/kolla/ubuntu-source-heat-engine:3.0.3
ironic_db_sync: docker.io/kolla/ubuntu-source-ironic-api:3.0.3
@ -88,8 +90,15 @@ conf:
auth_url: null
network:
interface:
provisioner: null
pxe:
device: ironic-pxe
neutron_network_name: baremetal
neutron_subnet_name: baremetal
neutron_provider_network: ironic
neutron_subnet_gateway: 172.24.6.1/24
neutron_subnet_cidr: 172.24.6.0/24
neutron_subnet_alloc_start: 172.24.6.100
neutron_subnet_alloc_end: 172.24.6.200
api:
ingress:
public: true
@ -130,6 +139,10 @@ bootstrap:
)
dependencies:
manage_cleaning_network:
services:
- service: network
endpoint: internal
db_init:
services:
- service: oslo_db
@ -171,6 +184,7 @@ dependencies:
- ironic-db-sync
- ironic-ks-user
- ironic-ks-endpoints
- ironic-manage-cleaning-network
services:
- service: oslo_db
endpoint: internal
@ -181,6 +195,7 @@ dependencies:
- ironic-db-sync
- ironic-ks-user
- ironic-ks-endpoints
- ironic-manage-cleaning-network
services:
- service: oslo_db
endpoint: internal
@ -451,6 +466,7 @@ manifests:
job_ks_endpoints: true
job_ks_service: true
job_ks_user: true
job_manage_cleaning_network: true
pdb_api: true
secret_db: true
secret_keystone: true

View File

@ -25,6 +25,8 @@ make pull-images nova
#NOTE(portdirect): for simplicity we will assume the default route device
# should be used for tunnels
NETWORK_TUNNEL_DEV="$(sudo ip -4 route list 0/0 | awk '{ print $5; exit }')"
OSH_IRONIC_PXE_DEV="ironic-pxe"
OSH_IRONIC_PXE_PYSNET="ironic"
tee /tmp/neutron.yaml << EOF
network:
interface:
@ -57,72 +59,38 @@ conf:
plugins:
ml2_conf:
ml2_type_flat:
flat_networks: public,physnet2
flat_networks: public,${OSH_IRONIC_PXE_PYSNET}
openvswitch_agent:
agent:
tunnel_types: vxlan
ovs:
bridge_mappings: "external:br-ex,physnet2:ironic-pxe"
manifests:
daemonset_dhcp_agent: false
daemonset_metadata_agent: false
daemonset_l3_agent: false
bridge_mappings: "external:br-ex,${OSH_IRONIC_PXE_PYSNET}:${OSH_IRONIC_PXE_DEV}"
EOF
helm install ./neutron \
--namespace=openstack \
--name=neutron \
--values=/tmp/neutron.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
export OS_CLOUD=openstack_helm
openstack service list
sleep 30 #NOTE(portdirect): Wait for ingress controller to update rules and restart Nginx
export OSH_IRONIC_PXE_NET_NAME="${OSH_IRONIC_PXE_NET_NAME:="baremetal"}"
IRONIC_NEUTRON_CLEANING_NET_ID=$(openstack network create -f value -c id --share --provider-network-type flat \
--provider-physical-network physnet2 ${OSH_IRONIC_PXE_NET_NAME})
export OSH_IRONIC_PXE_DEV=${OSH_IRONIC_PXE_DEV:="ironic-pxe"}
export OSH_IRONIC_PXE_ADDR="${OSH_IRONIC_PXE_ADDR:="172.24.6.1/24"}"
export OSH_IRONIC_PXE_SUBNET="${OSH_IRONIC_PXE_SUBNET:="172.24.6.0/24"}"
export OSH_IRONIC_PXE_ALOC_START="${OSH_IRONIC_PXE_ALOC_START:="172.24.6.100"}"
export OSH_IRONIC_PXE_ALOC_END="${OSH_IRONIC_PXE_ALOC_END:="172.24.6.200"}"
export OSH_IRONIC_PXE_SUBNET_NAME="${OSH_IRONIC_PXE_SUBNET_NAME:="baremetal"}"
openstack subnet create \
--gateway ${OSH_IRONIC_PXE_ADDR%/*} \
--allocation-pool start=${OSH_IRONIC_PXE_ALOC_START},end=${OSH_IRONIC_PXE_ALOC_END} \
--dns-nameserver $(kubectl get -n kube-system svc kube-dns -o json | jq -r '.spec.clusterIP') \
--subnet-range ${OSH_IRONIC_PXE_SUBNET} \
--network ${OSH_IRONIC_PXE_NET_NAME} \
${OSH_IRONIC_PXE_SUBNET_NAME}
tee /tmp/ironic.yaml << EOF
labels:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
network:
interface:
provisioner: "${OSH_IRONIC_PXE_DEV}"
pxe:
device: "${OSH_IRONIC_PXE_DEV}"
neutron_provider_network: "${OSH_IRONIC_PXE_PYSNET}"
conf:
ironic:
conductor:
automated_clean: "false"
deploy:
shred_final_overwrite_with_zeros: "false"
neutron:
cleaning_network_uuid: "${IRONIC_NEUTRON_CLEANING_NET_ID}"
EOF
helm install ./ironic \
--namespace=openstack \
--name=ironic \
--values=/tmp/ironic.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
tee /tmp/nova.yaml << EOF
labels:
agent:
@ -153,12 +121,6 @@ helm install ./nova \
--name=nova \
--values=/tmp/nova.yaml
helm upgrade neutron ./neutron \
--values=/tmp/neutron.yaml \
--set=manifests.daemonset_dhcp_agent=true \
--set=manifests.daemonset_metadata_agent=true \
--set=manifests.daemonset_l3_agent=true
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack

View File

@ -30,7 +30,7 @@ while read NODE_DETAIL_RAW; do
NODE_DETAIL=($(echo ${NODE_DETAIL_RAW}))
NODE_BMC_IP=${NODE_DETAIL[0]}
NODE_MAC=${NODE_DETAIL[1]}
if ! [ "x${MASTER_IP}" == "x${NODE_BMC_IP}" ]; then
if [ "$(kubectl get node -o name | wc -l)" -eq "1" ] || [ "x${MASTER_IP}" != "x${NODE_BMC_IP}" ]; then
BM_NODE=$(openstack baremetal node create \
--driver agent_ipmitool \
--driver-info ipmi_username=admin \