Nova: Support Cinder Volume Attachement using Ceph Backend
This PS enables cinder volume attachment for nova when used with ceph. Change-Id: I9772f38fb3a1a9af26bd92ee18a651d3372de64c
This commit is contained in:
parent
85b6716c49
commit
0251c099ba
@ -19,13 +19,10 @@ limitations under the License.
|
||||
set -ex
|
||||
export HOME=/tmp
|
||||
|
||||
cat <<EOF > /etc/ceph/ceph.client.keyring
|
||||
[client.{{ .Values.ceph.cinder_user }}]
|
||||
{{- if .Values.ceph.cinder_keyring }}
|
||||
key = {{ .Values.ceph.cinder_keyring }}
|
||||
{{- else }}
|
||||
key = $(cat /tmp/client-keyring)
|
||||
{{- end }}
|
||||
EOF
|
||||
|
||||
exit 0
|
||||
CEPH_CINDER_KEYRING_FILE="/etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring"
|
||||
echo "[client.${CEPH_CINDER_USER}]" > ${CEPH_CINDER_KEYRING_FILE}
|
||||
if ! [ -z "${CEPH_CINDER_KEYRING}" ] ; then
|
||||
echo " key = ${CEPH_CINDER_KEYRING}" >> ${CEPH_CINDER_KEYRING_FILE}
|
||||
else
|
||||
echo " key = $(cat /tmp/client-keyring)" >> ${CEPH_CINDER_KEYRING_FILE}
|
||||
fi
|
||||
|
@ -16,28 +16,43 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/}}
|
||||
|
||||
set -x
|
||||
LIBVIRT_SECRET_DEF=$(mktemp --suffix .xml)
|
||||
function cleanup {
|
||||
rm -f ${LIBVIRT_SECRET_DEF}
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
set -ex
|
||||
# Wait for the libvirtd is up
|
||||
TIMEOUT=60
|
||||
while [[ ! -f /var/run/libvirtd.pid ]]; do
|
||||
if [[ ${TIMEOUT} -gt 0 ]]; then
|
||||
let TIMEOUT-=1
|
||||
sleep 1
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
if [[ ${TIMEOUT} -gt 0 ]]; then
|
||||
let TIMEOUT-=1
|
||||
sleep 1
|
||||
else
|
||||
echo "ERROR: Libvirt did not start in time"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
cat > /tmp/secret.xml <<EOF
|
||||
if [ -z "${LIBVIRT_CEPH_SECRET_UUID}" ] ; then
|
||||
echo "ERROR: No Libvirt Secret UUID Supplied"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${CEPH_CINDER_KEYRING}" ] ; then
|
||||
CEPH_CINDER_KEYRING=$(sed -n 's/^[[:space:]]*key[[:blank:]]\+=[[:space:]]\(.*\)/\1/p' /etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring)
|
||||
fi
|
||||
|
||||
cat > ${LIBVIRT_SECRET_DEF} <<EOF
|
||||
<secret ephemeral='no' private='no'>
|
||||
<uuid>{{ .Values.ceph.secret_uuid }}</uuid>
|
||||
<uuid>${LIBVIRT_CEPH_SECRET_UUID}</uuid>
|
||||
<usage type='ceph'>
|
||||
<name>client.{{ .Values.ceph.cinder_user }} secret</name>
|
||||
<name>client.${CEPH_CINDER_USER}. secret</name>
|
||||
</usage>
|
||||
</secret>
|
||||
EOF
|
||||
|
||||
virsh secret-define --file /tmp/secret.xml
|
||||
virsh secret-set-value --secret {{ .Values.ceph.secret_uuid }} --base64 {{ .Values.ceph.cinder_keyring }}
|
||||
|
||||
rm /tmp/secret.xml
|
||||
virsh secret-define --file ${LIBVIRT_SECRET_DEF}
|
||||
virsh secret-set-value --secret "${LIBVIRT_CEPH_SECRET_UUID}" --base64 "${CEPH_CINDER_KEYRING}"
|
||||
|
@ -47,6 +47,15 @@ spec:
|
||||
- name: ceph-keyring-placement
|
||||
image: {{ .Values.images.compute }}
|
||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||
env:
|
||||
- name: CEPH_CINDER_USER
|
||||
value: "{{ .Values.ceph.cinder_user }}"
|
||||
{{- if .Values.ceph.cinder_keyring }}
|
||||
- name: CEPH_CINDER_KEYRING
|
||||
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||
{{ end }}
|
||||
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||
value: "{{ .Values.ceph.secret_uuid }}"
|
||||
command:
|
||||
- /tmp/ceph-keyring.sh
|
||||
volumeMounts:
|
||||
@ -85,10 +94,6 @@ spec:
|
||||
mountPath: /etc/ceph/ceph.conf
|
||||
subPath: ceph.conf
|
||||
readOnly: true
|
||||
- name: nova-etc
|
||||
mountPath: /etc/ceph/ceph.client.keyring
|
||||
subPath: ceph.client.keyring
|
||||
readOnly: true
|
||||
- mountPath: /lib/modules
|
||||
name: libmodules
|
||||
readOnly: true
|
||||
@ -109,6 +114,17 @@ spec:
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.compute | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
{{- if .Values.ceph.enabled }}
|
||||
env:
|
||||
- name: CEPH_CINDER_USER
|
||||
value: "{{ .Values.ceph.cinder_user }}"
|
||||
{{- if .Values.ceph.cinder_keyring }}
|
||||
- name: CEPH_CINDER_KEYRING
|
||||
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||
{{ end }}
|
||||
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||
value: "{{ .Values.ceph.secret_uuid }}"
|
||||
{{ end }}
|
||||
command:
|
||||
- /tmp/nova-compute.sh
|
||||
volumeMounts:
|
||||
|
@ -47,6 +47,15 @@ spec:
|
||||
- name: ceph-keyring-placement
|
||||
image: {{ .Values.images.libvirt }}
|
||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||
env:
|
||||
- name: CEPH_CINDER_USER
|
||||
value: "{{ .Values.ceph.cinder_user }}"
|
||||
{{- if .Values.ceph.cinder_keyring }}
|
||||
- name: CEPH_CINDER_KEYRING
|
||||
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||
{{ end }}
|
||||
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||
value: "{{ .Values.ceph.secret_uuid }}"
|
||||
command:
|
||||
- /tmp/ceph-keyring.sh
|
||||
volumeMounts:
|
||||
@ -65,6 +74,20 @@ spec:
|
||||
- name: nova-libvirt
|
||||
image: {{ .Values.images.libvirt }}
|
||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.libvirt | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
{{- if .Values.ceph.enabled }}
|
||||
env:
|
||||
- name: CEPH_CINDER_USER
|
||||
value: "{{ .Values.ceph.cinder_user }}"
|
||||
{{- if .Values.ceph.cinder_keyring }}
|
||||
- name: CEPH_CINDER_KEYRING
|
||||
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||
{{ end }}
|
||||
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||
value: "{{ .Values.ceph.secret_uuid }}"
|
||||
{{ end }}
|
||||
{{- if .Values.ceph.enabled }}
|
||||
lifecycle:
|
||||
postStart:
|
||||
@ -72,9 +95,6 @@ spec:
|
||||
command:
|
||||
- /tmp/ceph-secret-define.sh
|
||||
{{- end }}
|
||||
{{ tuple $envAll $envAll.Values.pod.resources.libvirt | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- /tmp/libvirt.sh
|
||||
volumeMounts:
|
||||
|
@ -130,12 +130,11 @@ network:
|
||||
targetPort: 6080
|
||||
|
||||
ceph:
|
||||
enabled: false
|
||||
enabled: true
|
||||
monitors: []
|
||||
cinder_user: "cinder"
|
||||
cinder_user: "admin"
|
||||
cinder_keyring: null
|
||||
nova_pool: "vms"
|
||||
secret_uuid: ""
|
||||
secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
|
||||
|
||||
libvirt:
|
||||
listen_addr: 0.0.0.0
|
||||
@ -372,7 +371,6 @@ conf:
|
||||
auth_type: password
|
||||
auth_version: v3
|
||||
memcache_security_strategy: ENCRYPT
|
||||
|
||||
libvirt:
|
||||
nova:
|
||||
conf:
|
||||
@ -380,8 +378,8 @@ conf:
|
||||
images_type: qcow2
|
||||
images_rbd_pool: vms
|
||||
images_rbd_ceph_conf: /etc/ceph/ceph.conf
|
||||
rbd_user: cinder
|
||||
rbd_secret_uuid: null
|
||||
rbd_user: admin
|
||||
rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
|
||||
disk_cachemodes: "network=writeback"
|
||||
hw_disk_discard: unmap
|
||||
upgrade_levels:
|
||||
|
@ -542,30 +542,7 @@ data:
|
||||
- application: nova
|
||||
- component: ks-user
|
||||
- release_group: osh-nova
|
||||
values:
|
||||
ceph:
|
||||
enabled: false
|
||||
conf:
|
||||
nova:
|
||||
default:
|
||||
oslo:
|
||||
log:
|
||||
debug: false
|
||||
libvirt:
|
||||
nova:
|
||||
conf:
|
||||
virt_type: qemu
|
||||
images_type: null
|
||||
images_rbd_pool: null
|
||||
images_rbd_ceph_conf: null
|
||||
rbd_user: null
|
||||
rbd_secret_uuid: null
|
||||
disk_cachemodes: null
|
||||
hw_disk_discard: null
|
||||
upgrade_levels:
|
||||
nova:
|
||||
conf:
|
||||
compute: null
|
||||
values: {}
|
||||
source:
|
||||
type: local
|
||||
location: /opt/openstack-helm/charts
|
||||
|
@ -535,30 +535,7 @@ data:
|
||||
- application: nova
|
||||
- component: ks-user
|
||||
- release_group: osh-nova
|
||||
values:
|
||||
ceph:
|
||||
enabled: false
|
||||
conf:
|
||||
nova:
|
||||
default:
|
||||
oslo:
|
||||
log:
|
||||
debug: false
|
||||
libvirt:
|
||||
nova:
|
||||
conf:
|
||||
virt_type: qemu
|
||||
images_type: null
|
||||
images_rbd_pool: null
|
||||
images_rbd_ceph_conf: null
|
||||
rbd_user: null
|
||||
rbd_secret_uuid: null
|
||||
disk_cachemodes: null
|
||||
hw_disk_discard: null
|
||||
upgrade_levels:
|
||||
nova:
|
||||
conf:
|
||||
compute: null
|
||||
values: {}
|
||||
source:
|
||||
type: local
|
||||
location: /opt/openstack-helm/charts
|
||||
|
@ -118,3 +118,24 @@ function openstack_wait_for_stack {
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
||||
function openstack_wait_for_volume {
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
if ! [ -z $3 ]; then
|
||||
end=$((end + $3))
|
||||
else
|
||||
end=$((end + 180))
|
||||
fi
|
||||
while true; do
|
||||
STATUS=$($OPENSTACK volume show $1 -f value -c status)
|
||||
[ $STATUS == "$2" ] && \
|
||||
break || true
|
||||
sleep 1
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo "Volume did not become $2 in time." && \
|
||||
$OPENSTACK volume show $1 && exit -1
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
17
tools/gate/funcs/python-data-to-json.py
Executable file
17
tools/gate/funcs/python-data-to-json.py
Executable file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
import json
|
||||
import sys
|
||||
|
||||
def dump(s):
|
||||
print json.dumps(eval(s))
|
||||
|
||||
def main(args):
|
||||
if not args:
|
||||
dump(''.join(sys.stdin.readlines()))
|
||||
else:
|
||||
for arg in args:
|
||||
dump(''.join(open(arg, 'r').readlines()))
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv[1:]))
|
@ -73,14 +73,22 @@ kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||
helm install --namespace=openstack ${WORK_DIR}/keystone --name=keystone
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
helm install --namespace=openstack ${WORK_DIR}/glance --name=glance
|
||||
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder
|
||||
else
|
||||
helm install --namespace=openstack ${WORK_DIR}/glance --name=glance \
|
||||
--values=${WORK_DIR}/tools/overrides/mvp/glance.yaml
|
||||
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder \
|
||||
--values=${WORK_DIR}/tools/overrides/mvp/cinder.yaml
|
||||
fi
|
||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
|
||||
--values=${WORK_DIR}/tools/overrides/mvp/nova.yaml \
|
||||
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
|
||||
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
|
||||
else
|
||||
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
|
||||
--values=${WORK_DIR}/tools/overrides/mvp/nova.yaml \
|
||||
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
|
||||
fi
|
||||
helm install --namespace=openstack ${WORK_DIR}/neutron --name=neutron \
|
||||
--values=${WORK_DIR}/tools/overrides/mvp/neutron.yaml
|
||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||
@ -89,12 +97,6 @@ helm install --namespace=openstack ${WORK_DIR}/heat --name=heat
|
||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||
|
||||
if [ "x$INTEGRATION" == "xmulti" ]; then
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder
|
||||
else
|
||||
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder \
|
||||
--values=${WORK_DIR}/tools/overrides/mvp/cinder.yaml
|
||||
fi
|
||||
helm install --namespace=openstack ${WORK_DIR}/horizon --name=horizon
|
||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||
|
||||
|
@ -87,5 +87,28 @@ ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} curl -sSL 169.254.169.254
|
||||
# Bonus round - display a Unicorn
|
||||
ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} curl http://artscene.textfiles.com/asciiart/unicorn || true
|
||||
|
||||
|
||||
if $OPENSTACK service list -f value -c Type | grep -q volume; then
|
||||
$OPENSTACK volume create \
|
||||
--size ${OSH_VOL_SIZE_CLI} \
|
||||
--type ${OSH_VOL_TYPE_CLI} \
|
||||
${OSH_VOL_NAME_CLI}
|
||||
openstack_wait_for_volume ${OSH_VOL_NAME_CLI} available ${SERVICE_TEST_TIMEOUT}
|
||||
|
||||
$OPENSTACK server add volume ${OSH_VM_NAME_CLI} ${OSH_VOL_NAME_CLI}
|
||||
openstack_wait_for_volume ${OSH_VOL_NAME_CLI} in-use ${SERVICE_TEST_TIMEOUT}
|
||||
|
||||
VOL_DEV=$($OPENSTACK volume show ${OSH_VOL_NAME_CLI} \
|
||||
-f value -c attachments | \
|
||||
${WORK_DIR}/tools/gate/funcs/python-data-to-json.py | \
|
||||
jq -r '.[] | .device')
|
||||
ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} sudo /usr/sbin/mkfs.ext4 ${VOL_DEV}
|
||||
|
||||
$OPENSTACK server remove volume ${OSH_VM_NAME_CLI} ${OSH_VOL_NAME_CLI}
|
||||
openstack_wait_for_volume ${OSH_VOL_NAME_CLI} available ${SERVICE_TEST_TIMEOUT}
|
||||
|
||||
$OPENSTACK volume delete ${OSH_VOL_NAME_CLI}
|
||||
fi
|
||||
|
||||
# Remove the test vm
|
||||
$NOVA delete ${OSH_VM_NAME_CLI}
|
||||
|
@ -71,6 +71,9 @@ export OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX=${OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX:=
|
||||
export OSH_VM_FLAVOR=${OSH_VM_FLAVOR:="m1.tiny"}
|
||||
export OSH_VM_NAME_CLI=${OSH_VM_NAME_CLI:="osh-smoketest"}
|
||||
export OSH_VM_KEY_CLI=${OSH_VM_KEY_CLI:="osh-smoketest-key"}
|
||||
export OSH_VOL_NAME_CLI=${OSH_VOL_NAME_CLI:="osh-volume"}
|
||||
export OSH_VOL_SIZE_CLI=${OSH_VOL_SIZE_CLI:="1"}
|
||||
export OSH_VOL_TYPE_CLI=${OSH_VOL_TYPE_CLI:="rbd1"}
|
||||
export OSH_PUB_NET_STACK=${OSH_PUB_NET_STACK:="heat-public-net-deployment"}
|
||||
export OSH_SUBNET_POOL_STACK=${OSH_SUBNET_POOL_STACK:="heat-subnet-pool-deployment"}
|
||||
export OSH_BASIC_VM_STACK=${OSH_BASIC_VM_STACK:="heat-basic-vm-deployment"}
|
||||
|
Loading…
Reference in New Issue
Block a user