Nova: Support Cinder Volume Attachement using Ceph Backend
This PS enables cinder volume attachment for nova when used with ceph. Change-Id: I9772f38fb3a1a9af26bd92ee18a651d3372de64c
This commit is contained in:
parent
85b6716c49
commit
0251c099ba
@ -19,13 +19,10 @@ limitations under the License.
|
|||||||
set -ex
|
set -ex
|
||||||
export HOME=/tmp
|
export HOME=/tmp
|
||||||
|
|
||||||
cat <<EOF > /etc/ceph/ceph.client.keyring
|
CEPH_CINDER_KEYRING_FILE="/etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring"
|
||||||
[client.{{ .Values.ceph.cinder_user }}]
|
echo "[client.${CEPH_CINDER_USER}]" > ${CEPH_CINDER_KEYRING_FILE}
|
||||||
{{- if .Values.ceph.cinder_keyring }}
|
if ! [ -z "${CEPH_CINDER_KEYRING}" ] ; then
|
||||||
key = {{ .Values.ceph.cinder_keyring }}
|
echo " key = ${CEPH_CINDER_KEYRING}" >> ${CEPH_CINDER_KEYRING_FILE}
|
||||||
{{- else }}
|
else
|
||||||
key = $(cat /tmp/client-keyring)
|
echo " key = $(cat /tmp/client-keyring)" >> ${CEPH_CINDER_KEYRING_FILE}
|
||||||
{{- end }}
|
fi
|
||||||
EOF
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
|
@ -16,6 +16,13 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/}}
|
*/}}
|
||||||
|
|
||||||
|
set -x
|
||||||
|
LIBVIRT_SECRET_DEF=$(mktemp --suffix .xml)
|
||||||
|
function cleanup {
|
||||||
|
rm -f ${LIBVIRT_SECRET_DEF}
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
# Wait for the libvirtd is up
|
# Wait for the libvirtd is up
|
||||||
TIMEOUT=60
|
TIMEOUT=60
|
||||||
@ -24,20 +31,28 @@ while [[ ! -f /var/run/libvirtd.pid ]]; do
|
|||||||
let TIMEOUT-=1
|
let TIMEOUT-=1
|
||||||
sleep 1
|
sleep 1
|
||||||
else
|
else
|
||||||
|
echo "ERROR: Libvirt did not start in time"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
cat > /tmp/secret.xml <<EOF
|
if [ -z "${LIBVIRT_CEPH_SECRET_UUID}" ] ; then
|
||||||
|
echo "ERROR: No Libvirt Secret UUID Supplied"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${CEPH_CINDER_KEYRING}" ] ; then
|
||||||
|
CEPH_CINDER_KEYRING=$(sed -n 's/^[[:space:]]*key[[:blank:]]\+=[[:space:]]\(.*\)/\1/p' /etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring)
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat > ${LIBVIRT_SECRET_DEF} <<EOF
|
||||||
<secret ephemeral='no' private='no'>
|
<secret ephemeral='no' private='no'>
|
||||||
<uuid>{{ .Values.ceph.secret_uuid }}</uuid>
|
<uuid>${LIBVIRT_CEPH_SECRET_UUID}</uuid>
|
||||||
<usage type='ceph'>
|
<usage type='ceph'>
|
||||||
<name>client.{{ .Values.ceph.cinder_user }} secret</name>
|
<name>client.${CEPH_CINDER_USER}. secret</name>
|
||||||
</usage>
|
</usage>
|
||||||
</secret>
|
</secret>
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
virsh secret-define --file /tmp/secret.xml
|
virsh secret-define --file ${LIBVIRT_SECRET_DEF}
|
||||||
virsh secret-set-value --secret {{ .Values.ceph.secret_uuid }} --base64 {{ .Values.ceph.cinder_keyring }}
|
virsh secret-set-value --secret "${LIBVIRT_CEPH_SECRET_UUID}" --base64 "${CEPH_CINDER_KEYRING}"
|
||||||
|
|
||||||
rm /tmp/secret.xml
|
|
||||||
|
@ -47,6 +47,15 @@ spec:
|
|||||||
- name: ceph-keyring-placement
|
- name: ceph-keyring-placement
|
||||||
image: {{ .Values.images.compute }}
|
image: {{ .Values.images.compute }}
|
||||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||||
|
env:
|
||||||
|
- name: CEPH_CINDER_USER
|
||||||
|
value: "{{ .Values.ceph.cinder_user }}"
|
||||||
|
{{- if .Values.ceph.cinder_keyring }}
|
||||||
|
- name: CEPH_CINDER_KEYRING
|
||||||
|
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||||
|
{{ end }}
|
||||||
|
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||||
|
value: "{{ .Values.ceph.secret_uuid }}"
|
||||||
command:
|
command:
|
||||||
- /tmp/ceph-keyring.sh
|
- /tmp/ceph-keyring.sh
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
@ -85,10 +94,6 @@ spec:
|
|||||||
mountPath: /etc/ceph/ceph.conf
|
mountPath: /etc/ceph/ceph.conf
|
||||||
subPath: ceph.conf
|
subPath: ceph.conf
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- name: nova-etc
|
|
||||||
mountPath: /etc/ceph/ceph.client.keyring
|
|
||||||
subPath: ceph.client.keyring
|
|
||||||
readOnly: true
|
|
||||||
- mountPath: /lib/modules
|
- mountPath: /lib/modules
|
||||||
name: libmodules
|
name: libmodules
|
||||||
readOnly: true
|
readOnly: true
|
||||||
@ -109,6 +114,17 @@ spec:
|
|||||||
{{ tuple $envAll $envAll.Values.pod.resources.compute | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
{{ tuple $envAll $envAll.Values.pod.resources.compute | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
|
{{- if .Values.ceph.enabled }}
|
||||||
|
env:
|
||||||
|
- name: CEPH_CINDER_USER
|
||||||
|
value: "{{ .Values.ceph.cinder_user }}"
|
||||||
|
{{- if .Values.ceph.cinder_keyring }}
|
||||||
|
- name: CEPH_CINDER_KEYRING
|
||||||
|
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||||
|
{{ end }}
|
||||||
|
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||||
|
value: "{{ .Values.ceph.secret_uuid }}"
|
||||||
|
{{ end }}
|
||||||
command:
|
command:
|
||||||
- /tmp/nova-compute.sh
|
- /tmp/nova-compute.sh
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
@ -47,6 +47,15 @@ spec:
|
|||||||
- name: ceph-keyring-placement
|
- name: ceph-keyring-placement
|
||||||
image: {{ .Values.images.libvirt }}
|
image: {{ .Values.images.libvirt }}
|
||||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||||
|
env:
|
||||||
|
- name: CEPH_CINDER_USER
|
||||||
|
value: "{{ .Values.ceph.cinder_user }}"
|
||||||
|
{{- if .Values.ceph.cinder_keyring }}
|
||||||
|
- name: CEPH_CINDER_KEYRING
|
||||||
|
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||||
|
{{ end }}
|
||||||
|
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||||
|
value: "{{ .Values.ceph.secret_uuid }}"
|
||||||
command:
|
command:
|
||||||
- /tmp/ceph-keyring.sh
|
- /tmp/ceph-keyring.sh
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
@ -65,6 +74,20 @@ spec:
|
|||||||
- name: nova-libvirt
|
- name: nova-libvirt
|
||||||
image: {{ .Values.images.libvirt }}
|
image: {{ .Values.images.libvirt }}
|
||||||
imagePullPolicy: {{ .Values.images.pull_policy }}
|
imagePullPolicy: {{ .Values.images.pull_policy }}
|
||||||
|
{{ tuple $envAll $envAll.Values.pod.resources.libvirt | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
{{- if .Values.ceph.enabled }}
|
||||||
|
env:
|
||||||
|
- name: CEPH_CINDER_USER
|
||||||
|
value: "{{ .Values.ceph.cinder_user }}"
|
||||||
|
{{- if .Values.ceph.cinder_keyring }}
|
||||||
|
- name: CEPH_CINDER_KEYRING
|
||||||
|
value: "{{ .Values.ceph.cinder_keyring }}"
|
||||||
|
{{ end }}
|
||||||
|
- name: LIBVIRT_CEPH_SECRET_UUID
|
||||||
|
value: "{{ .Values.ceph.secret_uuid }}"
|
||||||
|
{{ end }}
|
||||||
{{- if .Values.ceph.enabled }}
|
{{- if .Values.ceph.enabled }}
|
||||||
lifecycle:
|
lifecycle:
|
||||||
postStart:
|
postStart:
|
||||||
@ -72,9 +95,6 @@ spec:
|
|||||||
command:
|
command:
|
||||||
- /tmp/ceph-secret-define.sh
|
- /tmp/ceph-secret-define.sh
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{ tuple $envAll $envAll.Values.pod.resources.libvirt | include "helm-toolkit.snippets.kubernetes_resources" | indent 10 }}
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
command:
|
command:
|
||||||
- /tmp/libvirt.sh
|
- /tmp/libvirt.sh
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
@ -130,12 +130,11 @@ network:
|
|||||||
targetPort: 6080
|
targetPort: 6080
|
||||||
|
|
||||||
ceph:
|
ceph:
|
||||||
enabled: false
|
enabled: true
|
||||||
monitors: []
|
monitors: []
|
||||||
cinder_user: "cinder"
|
cinder_user: "admin"
|
||||||
cinder_keyring: null
|
cinder_keyring: null
|
||||||
nova_pool: "vms"
|
secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
|
||||||
secret_uuid: ""
|
|
||||||
|
|
||||||
libvirt:
|
libvirt:
|
||||||
listen_addr: 0.0.0.0
|
listen_addr: 0.0.0.0
|
||||||
@ -372,7 +371,6 @@ conf:
|
|||||||
auth_type: password
|
auth_type: password
|
||||||
auth_version: v3
|
auth_version: v3
|
||||||
memcache_security_strategy: ENCRYPT
|
memcache_security_strategy: ENCRYPT
|
||||||
|
|
||||||
libvirt:
|
libvirt:
|
||||||
nova:
|
nova:
|
||||||
conf:
|
conf:
|
||||||
@ -380,8 +378,8 @@ conf:
|
|||||||
images_type: qcow2
|
images_type: qcow2
|
||||||
images_rbd_pool: vms
|
images_rbd_pool: vms
|
||||||
images_rbd_ceph_conf: /etc/ceph/ceph.conf
|
images_rbd_ceph_conf: /etc/ceph/ceph.conf
|
||||||
rbd_user: cinder
|
rbd_user: admin
|
||||||
rbd_secret_uuid: null
|
rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
|
||||||
disk_cachemodes: "network=writeback"
|
disk_cachemodes: "network=writeback"
|
||||||
hw_disk_discard: unmap
|
hw_disk_discard: unmap
|
||||||
upgrade_levels:
|
upgrade_levels:
|
||||||
|
@ -542,30 +542,7 @@ data:
|
|||||||
- application: nova
|
- application: nova
|
||||||
- component: ks-user
|
- component: ks-user
|
||||||
- release_group: osh-nova
|
- release_group: osh-nova
|
||||||
values:
|
values: {}
|
||||||
ceph:
|
|
||||||
enabled: false
|
|
||||||
conf:
|
|
||||||
nova:
|
|
||||||
default:
|
|
||||||
oslo:
|
|
||||||
log:
|
|
||||||
debug: false
|
|
||||||
libvirt:
|
|
||||||
nova:
|
|
||||||
conf:
|
|
||||||
virt_type: qemu
|
|
||||||
images_type: null
|
|
||||||
images_rbd_pool: null
|
|
||||||
images_rbd_ceph_conf: null
|
|
||||||
rbd_user: null
|
|
||||||
rbd_secret_uuid: null
|
|
||||||
disk_cachemodes: null
|
|
||||||
hw_disk_discard: null
|
|
||||||
upgrade_levels:
|
|
||||||
nova:
|
|
||||||
conf:
|
|
||||||
compute: null
|
|
||||||
source:
|
source:
|
||||||
type: local
|
type: local
|
||||||
location: /opt/openstack-helm/charts
|
location: /opt/openstack-helm/charts
|
||||||
|
@ -535,30 +535,7 @@ data:
|
|||||||
- application: nova
|
- application: nova
|
||||||
- component: ks-user
|
- component: ks-user
|
||||||
- release_group: osh-nova
|
- release_group: osh-nova
|
||||||
values:
|
values: {}
|
||||||
ceph:
|
|
||||||
enabled: false
|
|
||||||
conf:
|
|
||||||
nova:
|
|
||||||
default:
|
|
||||||
oslo:
|
|
||||||
log:
|
|
||||||
debug: false
|
|
||||||
libvirt:
|
|
||||||
nova:
|
|
||||||
conf:
|
|
||||||
virt_type: qemu
|
|
||||||
images_type: null
|
|
||||||
images_rbd_pool: null
|
|
||||||
images_rbd_ceph_conf: null
|
|
||||||
rbd_user: null
|
|
||||||
rbd_secret_uuid: null
|
|
||||||
disk_cachemodes: null
|
|
||||||
hw_disk_discard: null
|
|
||||||
upgrade_levels:
|
|
||||||
nova:
|
|
||||||
conf:
|
|
||||||
compute: null
|
|
||||||
source:
|
source:
|
||||||
type: local
|
type: local
|
||||||
location: /opt/openstack-helm/charts
|
location: /opt/openstack-helm/charts
|
||||||
|
@ -118,3 +118,24 @@ function openstack_wait_for_stack {
|
|||||||
done
|
done
|
||||||
set -x
|
set -x
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function openstack_wait_for_volume {
|
||||||
|
# Default wait timeout is 180 seconds
|
||||||
|
set +x
|
||||||
|
end=$(date +%s)
|
||||||
|
if ! [ -z $3 ]; then
|
||||||
|
end=$((end + $3))
|
||||||
|
else
|
||||||
|
end=$((end + 180))
|
||||||
|
fi
|
||||||
|
while true; do
|
||||||
|
STATUS=$($OPENSTACK volume show $1 -f value -c status)
|
||||||
|
[ $STATUS == "$2" ] && \
|
||||||
|
break || true
|
||||||
|
sleep 1
|
||||||
|
now=$(date +%s)
|
||||||
|
[ $now -gt $end ] && echo "Volume did not become $2 in time." && \
|
||||||
|
$OPENSTACK volume show $1 && exit -1
|
||||||
|
done
|
||||||
|
set -x
|
||||||
|
}
|
||||||
|
17
tools/gate/funcs/python-data-to-json.py
Executable file
17
tools/gate/funcs/python-data-to-json.py
Executable file
@ -0,0 +1,17 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def dump(s):
|
||||||
|
print json.dumps(eval(s))
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
if not args:
|
||||||
|
dump(''.join(sys.stdin.readlines()))
|
||||||
|
else:
|
||||||
|
for arg in args:
|
||||||
|
dump(''.join(open(arg, 'r').readlines()))
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main(sys.argv[1:]))
|
@ -73,14 +73,22 @@ kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
|||||||
helm install --namespace=openstack ${WORK_DIR}/keystone --name=keystone
|
helm install --namespace=openstack ${WORK_DIR}/keystone --name=keystone
|
||||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||||
helm install --namespace=openstack ${WORK_DIR}/glance --name=glance
|
helm install --namespace=openstack ${WORK_DIR}/glance --name=glance
|
||||||
|
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder
|
||||||
else
|
else
|
||||||
helm install --namespace=openstack ${WORK_DIR}/glance --name=glance \
|
helm install --namespace=openstack ${WORK_DIR}/glance --name=glance \
|
||||||
--values=${WORK_DIR}/tools/overrides/mvp/glance.yaml
|
--values=${WORK_DIR}/tools/overrides/mvp/glance.yaml
|
||||||
|
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder \
|
||||||
|
--values=${WORK_DIR}/tools/overrides/mvp/cinder.yaml
|
||||||
fi
|
fi
|
||||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||||
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
|
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||||
|
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
|
||||||
|
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
|
||||||
|
else
|
||||||
|
helm install --namespace=openstack ${WORK_DIR}/nova --name=nova \
|
||||||
--values=${WORK_DIR}/tools/overrides/mvp/nova.yaml \
|
--values=${WORK_DIR}/tools/overrides/mvp/nova.yaml \
|
||||||
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
|
--set=conf.nova.libvirt.nova.conf.virt_type=qemu
|
||||||
|
fi
|
||||||
helm install --namespace=openstack ${WORK_DIR}/neutron --name=neutron \
|
helm install --namespace=openstack ${WORK_DIR}/neutron --name=neutron \
|
||||||
--values=${WORK_DIR}/tools/overrides/mvp/neutron.yaml
|
--values=${WORK_DIR}/tools/overrides/mvp/neutron.yaml
|
||||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||||
@ -89,12 +97,6 @@ helm install --namespace=openstack ${WORK_DIR}/heat --name=heat
|
|||||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||||
|
|
||||||
if [ "x$INTEGRATION" == "xmulti" ]; then
|
if [ "x$INTEGRATION" == "xmulti" ]; then
|
||||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
|
||||||
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder
|
|
||||||
else
|
|
||||||
helm install --namespace=openstack ${WORK_DIR}/cinder --name=cinder \
|
|
||||||
--values=${WORK_DIR}/tools/overrides/mvp/cinder.yaml
|
|
||||||
fi
|
|
||||||
helm install --namespace=openstack ${WORK_DIR}/horizon --name=horizon
|
helm install --namespace=openstack ${WORK_DIR}/horizon --name=horizon
|
||||||
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
kube_wait_for_pods openstack ${SERVICE_LAUNCH_TIMEOUT}
|
||||||
|
|
||||||
|
@ -87,5 +87,28 @@ ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} curl -sSL 169.254.169.254
|
|||||||
# Bonus round - display a Unicorn
|
# Bonus round - display a Unicorn
|
||||||
ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} curl http://artscene.textfiles.com/asciiart/unicorn || true
|
ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} curl http://artscene.textfiles.com/asciiart/unicorn || true
|
||||||
|
|
||||||
|
|
||||||
|
if $OPENSTACK service list -f value -c Type | grep -q volume; then
|
||||||
|
$OPENSTACK volume create \
|
||||||
|
--size ${OSH_VOL_SIZE_CLI} \
|
||||||
|
--type ${OSH_VOL_TYPE_CLI} \
|
||||||
|
${OSH_VOL_NAME_CLI}
|
||||||
|
openstack_wait_for_volume ${OSH_VOL_NAME_CLI} available ${SERVICE_TEST_TIMEOUT}
|
||||||
|
|
||||||
|
$OPENSTACK server add volume ${OSH_VM_NAME_CLI} ${OSH_VOL_NAME_CLI}
|
||||||
|
openstack_wait_for_volume ${OSH_VOL_NAME_CLI} in-use ${SERVICE_TEST_TIMEOUT}
|
||||||
|
|
||||||
|
VOL_DEV=$($OPENSTACK volume show ${OSH_VOL_NAME_CLI} \
|
||||||
|
-f value -c attachments | \
|
||||||
|
${WORK_DIR}/tools/gate/funcs/python-data-to-json.py | \
|
||||||
|
jq -r '.[] | .device')
|
||||||
|
ssh -i ${KEYPAIR_LOC} cirros@${FLOATING_IP} sudo /usr/sbin/mkfs.ext4 ${VOL_DEV}
|
||||||
|
|
||||||
|
$OPENSTACK server remove volume ${OSH_VM_NAME_CLI} ${OSH_VOL_NAME_CLI}
|
||||||
|
openstack_wait_for_volume ${OSH_VOL_NAME_CLI} available ${SERVICE_TEST_TIMEOUT}
|
||||||
|
|
||||||
|
$OPENSTACK volume delete ${OSH_VOL_NAME_CLI}
|
||||||
|
fi
|
||||||
|
|
||||||
# Remove the test vm
|
# Remove the test vm
|
||||||
$NOVA delete ${OSH_VM_NAME_CLI}
|
$NOVA delete ${OSH_VM_NAME_CLI}
|
||||||
|
@ -71,6 +71,9 @@ export OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX=${OSH_PRIVATE_SUBNET_POOL_DEF_PREFIX:=
|
|||||||
export OSH_VM_FLAVOR=${OSH_VM_FLAVOR:="m1.tiny"}
|
export OSH_VM_FLAVOR=${OSH_VM_FLAVOR:="m1.tiny"}
|
||||||
export OSH_VM_NAME_CLI=${OSH_VM_NAME_CLI:="osh-smoketest"}
|
export OSH_VM_NAME_CLI=${OSH_VM_NAME_CLI:="osh-smoketest"}
|
||||||
export OSH_VM_KEY_CLI=${OSH_VM_KEY_CLI:="osh-smoketest-key"}
|
export OSH_VM_KEY_CLI=${OSH_VM_KEY_CLI:="osh-smoketest-key"}
|
||||||
|
export OSH_VOL_NAME_CLI=${OSH_VOL_NAME_CLI:="osh-volume"}
|
||||||
|
export OSH_VOL_SIZE_CLI=${OSH_VOL_SIZE_CLI:="1"}
|
||||||
|
export OSH_VOL_TYPE_CLI=${OSH_VOL_TYPE_CLI:="rbd1"}
|
||||||
export OSH_PUB_NET_STACK=${OSH_PUB_NET_STACK:="heat-public-net-deployment"}
|
export OSH_PUB_NET_STACK=${OSH_PUB_NET_STACK:="heat-public-net-deployment"}
|
||||||
export OSH_SUBNET_POOL_STACK=${OSH_SUBNET_POOL_STACK:="heat-subnet-pool-deployment"}
|
export OSH_SUBNET_POOL_STACK=${OSH_SUBNET_POOL_STACK:="heat-subnet-pool-deployment"}
|
||||||
export OSH_BASIC_VM_STACK=${OSH_BASIC_VM_STACK:="heat-basic-vm-deployment"}
|
export OSH_BASIC_VM_STACK=${OSH_BASIC_VM_STACK:="heat-basic-vm-deployment"}
|
||||||
|
Loading…
Reference in New Issue
Block a user