Merge "External Ceph Tools, Tests, and Docs"
This commit is contained in:
commit
1700e56744
@ -10,6 +10,63 @@ Overview
|
||||
We are currently recommending that you deploy ceph using kolla-ansible or
|
||||
ceph-deploy.
|
||||
|
||||
Install steps
|
||||
=============
|
||||
|
||||
This list of instructions is currently incomplete.
|
||||
|
||||
completely remove the following option from /etc/kolla-kubernetes/kolla-kubernetes.yaml
|
||||
|
||||
::
|
||||
|
||||
keyring: /etc/ceph/ceph.client.admin.keyring
|
||||
|
||||
set the user option in the storage_ceph to 'kolla' in /etc/kolla-kubernetes/kolla-kubernetes.yaml and
|
||||
pool = kollavolumes
|
||||
|
||||
Upload ceph.conf and admin key generated from the external ceph while
|
||||
orchestration_engine=ANSIBLE
|
||||
|
||||
::
|
||||
|
||||
kubectl create configmap ceph-conf --namespace=kolla \
|
||||
--from-file=ceph.conf=/etc/kolla/ceph-osd/ceph.conf
|
||||
kubectl create secret generic ceph-client-admin-keyring --namespace=kolla\
|
||||
--from-file=data=/etc/kolla/ceph-osd/ceph.client.admin.keyring
|
||||
|
||||
Before any pv's are created, do the following
|
||||
|
||||
::
|
||||
|
||||
kollakube res create pod ceph-rbd
|
||||
kollakube res create pod ceph-admin
|
||||
watch kubectl get pods --namespace=kolla
|
||||
|
||||
Wait for ceph-admin to come up.
|
||||
|
||||
Create a pool a user:
|
||||
|
||||
::
|
||||
#FIXME probably needs a pool per region name?
|
||||
str="ceph osd pool create kollavolumes 32"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c \
|
||||
"$str" > /tmp/$$
|
||||
str="ceph auth get-or-create client.kolla mon 'allow r' osd 'allow "
|
||||
str="$str class-read object_prefix rbd_children, allow rwx pool=kollavolumes"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c \
|
||||
"$str" | awk '{if($1 == "key"){print $3}}' > /tmp/$$
|
||||
kubectl create secret generic ceph-kolla --namespace=kolla \
|
||||
--from-file=key=/tmp/$$
|
||||
rm -f /tmp/$$
|
||||
|
||||
Create disks for 'rabbitmq' and 'mariadb' like so
|
||||
|
||||
::
|
||||
|
||||
cmd="rbd create --pool kollavolumes --image-feature layering --size 10240"
|
||||
cmd="$cmd mariadb; rbd map --pool kollavolumes mariadb; #format it and unmount/unmap..."
|
||||
kubectl exec -it ceph-admin -- /bin/bash -xec "$cmd"
|
||||
|
||||
Ceph managed by Kolla-Kubernetes
|
||||
================================
|
||||
|
||||
|
@ -26,6 +26,7 @@ Install minikube with kvm support
|
||||
|
||||
curl -Lo kubectl \
|
||||
http://storage.googleapis.com/kubernetes-release/release/v1.3.6/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin
|
||||
|
||||
Start minikube and services
|
||||
|
@ -82,6 +82,7 @@ dns_domain_name: "openstack.kolla"
|
||||
# Persistent Storage
|
||||
########################
|
||||
storage_provider: "host" # host, ceph, gce, aws
|
||||
storage_provider_fstype: "ext4"
|
||||
storage_ceph:
|
||||
# - WARNING: These sample defaults configure ceph access using the
|
||||
# ceph "admin" user/key, because it involves the least amount of
|
||||
@ -229,8 +230,13 @@ enable_resolve_conf_net_host_workaround: "yes"
|
||||
########################
|
||||
# WARNING! reminder, this ceph setup is only intended for testing.
|
||||
|
||||
ceph_osd_journal_dev: "/dev/loop0p1"
|
||||
ceph_osd_data_dev: "/dev/loop0p2"
|
||||
ceph_osd_journal_dev:
|
||||
- "/dev/loop0p1"
|
||||
- "/dev/loop1p1"
|
||||
|
||||
ceph_osd_data_dev:
|
||||
- "/dev/loop0p2"
|
||||
- "/dev/loop1p2"
|
||||
|
||||
########################
|
||||
# Rabbitmq variables
|
||||
|
@ -39,12 +39,28 @@ kolla-kubernetes:
|
||||
- name: ceph-bootstrap-initial-mon
|
||||
template: services/ceph/ceph-bootstrap-initial-mon.yml.j2
|
||||
pod:
|
||||
- name: ceph-bootstrap-osd
|
||||
- name: ceph-bootstrap-osd0
|
||||
template: services/ceph/ceph-bootstrap-osd.yml.j2
|
||||
vars:
|
||||
index: '0'
|
||||
- name: ceph-bootstrap-osd1
|
||||
template: services/ceph/ceph-bootstrap-osd.yml.j2
|
||||
vars:
|
||||
index: '1'
|
||||
- name: ceph-mon
|
||||
template: services/ceph/ceph-mon-pod.yml.j2
|
||||
- name: ceph-osd
|
||||
- name: ceph-osd0
|
||||
template: services/ceph/ceph-osd-pod.yml.j2
|
||||
vars:
|
||||
index: '0'
|
||||
- name: ceph-osd1
|
||||
template: services/ceph/ceph-osd-pod.yml.j2
|
||||
vars:
|
||||
index: '1'
|
||||
- name: ceph-admin
|
||||
template: services/ceph/ceph-admin-pod.yml.j2
|
||||
- name: ceph-rbd
|
||||
template: services/ceph/ceph-rbd-pod.yml.j2
|
||||
- name: mariadb
|
||||
pods:
|
||||
- name: mariadb
|
||||
@ -893,6 +909,36 @@ kolla-kubernetes:
|
||||
service_auth: openstack_cinder_auth
|
||||
description: Openstack Block Storage
|
||||
endpoint: cinder_admin_endpoint
|
||||
- name: cinder-create-keystone-endpoint-publicv2
|
||||
template: services/common/common-create-keystone-endpoint.yml.j2
|
||||
vars:
|
||||
service_name: cinder
|
||||
service_type: volume
|
||||
interface: public
|
||||
service_auth: openstack_cinder_auth
|
||||
description: Openstack Block Storage
|
||||
endpoint: cinder_public_endpoint
|
||||
postfix: v2
|
||||
- name: cinder-create-keystone-endpoint-internalv2
|
||||
template: services/common/common-create-keystone-endpoint.yml.j2
|
||||
vars:
|
||||
service_name: cinder
|
||||
service_type: volume
|
||||
interface: internal
|
||||
service_auth: openstack_cinder_auth
|
||||
description: Openstack Block Storage
|
||||
endpoint: cinder_internal_endpoint
|
||||
postfix: v2
|
||||
- name: cinder-create-keystone-endpoint-adminv2
|
||||
template: services/common/common-create-keystone-endpoint.yml.j2
|
||||
vars:
|
||||
service_name: cinder
|
||||
service_type: volume
|
||||
interface: admin
|
||||
service_auth: openstack_cinder_auth
|
||||
description: Openstack Block Storage
|
||||
endpoint: cinder_admin_endpoint
|
||||
postfix: v2
|
||||
- name: cinder-create-keystone-user
|
||||
template: services/common/common-create-keystone-user.yml.j2
|
||||
vars:
|
||||
|
56
services/ceph/ceph-admin-pod.yml.j2
Normal file
56
services/ceph/ceph-admin-pod.yml.j2
Normal file
@ -0,0 +1,56 @@
|
||||
{%- set resourceName = kolla_kubernetes.cli.args.resource_name %}
|
||||
{%- import "services/common/common-lib.yml.j2" as lib with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: ceph-admin
|
||||
namespace: {{ kolla_kubernetes_namespace }}
|
||||
spec:
|
||||
hostNetwork: True
|
||||
hostPID: True
|
||||
nodeSelector:
|
||||
{%- set selector = kolla_kubernetes_hostlabel_ceph_admin |
|
||||
default(kolla_kubernetes_hostlabel_storage |
|
||||
default(kolla_kubernetes_hostlabel_controller)
|
||||
)
|
||||
%}
|
||||
{{ selector.key }}: {{ selector.value }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- image: "{{ ceph_mon_image_full }}"
|
||||
name: main
|
||||
command:
|
||||
- /bin/bash
|
||||
- -xec
|
||||
- |
|
||||
modprobe rbd;
|
||||
while true; do sleep 1000; done
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
{{ lib.common_volume_mounts(indent=8) }}
|
||||
- mountPath: /etc/ceph/ceph.conf
|
||||
name: ceph-conf
|
||||
readOnly: true
|
||||
subPath: ceph.conf
|
||||
- mountPath: /etc/ceph/ceph.client.admin.keyring
|
||||
subPath: data
|
||||
name: ceph-client-admin-keyring
|
||||
- mountPath: /dev
|
||||
name: host-dev
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
volumes:
|
||||
{{ lib.common_volumes(indent=4) }}
|
||||
- name: ceph-conf
|
||||
configMap:
|
||||
name: ceph-conf
|
||||
- name: host-dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: ceph-client-admin-keyring
|
||||
secret:
|
||||
secretName: ceph-client-admin-keyring
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
@ -1,10 +1,11 @@
|
||||
{%- set podTypeBootstrap = "yes" %}
|
||||
{%- set resourceName = kolla_kubernetes.cli.args.resource_name %}
|
||||
{%- set index = kolla_kubernetes.template.vars.index %}
|
||||
{%- import "services/common/common-lib.yml.j2" as lib with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: ceph-bootstrap-osd
|
||||
name: ceph-bootstrap-osd{{ index }}
|
||||
namespace: {{ kolla_kubernetes_namespace }}
|
||||
annotations:
|
||||
#FIXME Once out of alpha, this should be converted to yaml.
|
||||
@ -61,17 +62,17 @@ spec:
|
||||
- name: USE_EXTERNAL_JOURNAL
|
||||
value: "True"
|
||||
- name: JOURNAL_DEV
|
||||
value: "/dev/loop0"
|
||||
value: "/dev/loop{{ index }}"
|
||||
- name: JOURNAL_PARTITION_NUM
|
||||
value: "1"
|
||||
- name: JOURNAL_PARTITION
|
||||
value: "/dev/loop0p1"
|
||||
value: "/dev/loop{{ index }}p1"
|
||||
- name: OSD_DEV
|
||||
value: "/dev/loop0"
|
||||
value: "/dev/loop{{ index }}"
|
||||
- name: OSD_PARTITION_NUM
|
||||
value: "2"
|
||||
- name: OSD_PARTITION
|
||||
value: "/dev/loop0p2"
|
||||
value: "/dev/loop{{ index }}p2"
|
||||
- name: OSD_INITIAL_WEIGHT
|
||||
value: "1"
|
||||
- name: OSD_FILESYSTEM
|
||||
|
@ -1,9 +1,10 @@
|
||||
{%- set resourceName = kolla_kubernetes.cli.args.resource_name %}
|
||||
{%- set index = kolla_kubernetes.template.vars.index %}
|
||||
{%- import "services/common/common-lib.yml.j2" as lib with context %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: ceph-osd
|
||||
name: ceph-osd{{ index }}
|
||||
namespace: {{ kolla_kubernetes_namespace }}
|
||||
annotations:
|
||||
#FIXME Once out of alpha, this should be converted to yaml.
|
||||
@ -55,7 +56,7 @@ spec:
|
||||
- /bin/bash
|
||||
- -ec
|
||||
- |
|
||||
mount {{ ceph_osd_data_dev }} /var/lib/ceph/osd/ceph-0
|
||||
mount {{ ceph_osd_data_dev[index | int] }} /var/lib/ceph/osd/ceph-{{ index }}
|
||||
kolla_start
|
||||
securityContext:
|
||||
privileged: true
|
||||
@ -63,9 +64,9 @@ spec:
|
||||
- name: KOLLA_CONFIG_STRATEGY
|
||||
value: "{{ config_strategy }}"
|
||||
- name: OSD_ID
|
||||
value: "0"
|
||||
value: "{{ index }}"
|
||||
- name: JOURNAL_PARTITION
|
||||
value: {{ ceph_osd_journal_dev }}
|
||||
value: {{ ceph_osd_journal_dev[index | int] }}
|
||||
- name: HOSTNAME
|
||||
value: {{ storage_ceph.initial_mon }}
|
||||
volumeMounts:
|
||||
|
82
services/ceph/ceph-rbd-pod.yml.j2
Normal file
82
services/ceph/ceph-rbd-pod.yml.j2
Normal file
@ -0,0 +1,82 @@
|
||||
{%- set resourceName = kolla_kubernetes.cli.args.resource_name %}
|
||||
{%- import "services/common/common-lib.yml.j2" as lib with context %}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: ceph-rbd
|
||||
labels:
|
||||
component: ceph
|
||||
system: rbd
|
||||
namespace: {{ kolla_kubernetes_namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ceph
|
||||
system: rbd
|
||||
spec:
|
||||
hostNetwork: True
|
||||
hostPID: True
|
||||
nodeSelector:
|
||||
{%- set selector = kolla_kubernetes_hostlabel_ceph_rbd |
|
||||
default(kolla_kubernetes_hostlabel_controller)
|
||||
%}
|
||||
{{ selector.key }}: {{ selector.value }}
|
||||
containers:
|
||||
- image: "{{ ceph_mon_image_full }}"
|
||||
name: main
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- /bin/bash
|
||||
- -xec
|
||||
- |
|
||||
modprobe rbd;
|
||||
if [ -x /host/rbd ]; then
|
||||
grep label=io.kubernetes.pod.namespace /host/rbd > /dev/null && rm -f /host/rbd
|
||||
fi
|
||||
if [ ! -x /host/rbd ]; then
|
||||
echo IyEvYmluL2Jhc2gKCg== | base64 -d > /host/rbd;
|
||||
echo 'ID=$(docker ps -q -f label=io.kubernetes.pod.namespace='$POD_NAMESPACE' -f label=io.kubernetes.pod.name='$POD_NAME' -f label=io.kubernetes.container.name=main);' >> /host/rbd;
|
||||
echo 'docker exec --privileged -u 0 -i $ID /usr/bin/rbd "$@"' >> /host/rbd;
|
||||
chmod +x /host/rbd;
|
||||
fi;
|
||||
while true; do sleep 1000; done
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
{{ lib.common_volume_mounts(indent=12) }}
|
||||
- mountPath: /host/
|
||||
name: host-usr-bin
|
||||
- mountPath: /dev
|
||||
name: host-dev
|
||||
- mountPath: /sys
|
||||
name: host-sys
|
||||
- mountPath: /etc
|
||||
name: host-etc
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
volumes:
|
||||
{{ lib.common_volumes(indent=8) }}
|
||||
- name: host-usr-bin
|
||||
hostPath:
|
||||
path: /usr/bin
|
||||
- name: host-dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
- name: host-sys
|
||||
hostPath:
|
||||
path: /sys
|
||||
- name: host-etc
|
||||
hostPath:
|
||||
path: /etc
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
@ -6,11 +6,12 @@
|
||||
{%- set serviceAuth = kolla_kubernetes.template.vars.service_auth %}
|
||||
{%- set description = kolla_kubernetes.template.vars.description %}
|
||||
{%- set endpoint = kolla_kubernetes.template.vars.endpoint %}
|
||||
{%- set postfix = kolla_kubernetes.template.vars.postfix | default("") %}
|
||||
{%- import "services/common/common-lib.yml.j2" as lib with context %}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ serviceName }}-create-keystone-endpoint-{{ interface }}
|
||||
name: {{ serviceName }}-create-keystone-endpoint-{{ interface }}{{ postfix }}
|
||||
namespace: {{ kolla_kubernetes_namespace }}
|
||||
spec:
|
||||
parallelism: 1
|
||||
@ -26,8 +27,8 @@ spec:
|
||||
command: ["sh", "-xec"]
|
||||
args:
|
||||
- /usr/bin/ansible localhost -vvvv -m kolla_keystone_service
|
||||
-a "service_name={{ serviceName }}
|
||||
service_type={{ serviceType }}
|
||||
-a "service_name={{ serviceName }}{{ postfix }}
|
||||
service_type={{ serviceType }}{{ postfix }}
|
||||
description='{{ description }}'
|
||||
endpoint_region={{ openstack_region_name }}
|
||||
url='{{ global[endpoint] }}'
|
||||
|
@ -18,7 +18,7 @@ spec:
|
||||
{%- elif storage_provider == "gce" %}
|
||||
gcePersistentDisk:
|
||||
pdName: {{ resourceName }}
|
||||
fsType: ext4
|
||||
fsType: {{ storage_provider_fstype }}
|
||||
|
||||
{%- elif storage_provider == "ceph" %}
|
||||
rbd:
|
||||
@ -32,11 +32,11 @@ spec:
|
||||
{%- if storage_ceph.keyring is defined and storage_ceph.keyring|length>0 %}
|
||||
keyring: {{ storage_ceph.keyring }}
|
||||
{%- endif %}
|
||||
{%- if storage_ceph.secretName is defined and storage_ceph.secretName|length>0 and storage_ceph.key is defined and storage_ceph.key|length>0 %}
|
||||
{%- if storage_ceph.secretName is defined and storage_ceph.secretName|length>0 %}
|
||||
secretRef:
|
||||
name: {{ storage_ceph.secretName }}
|
||||
{%- endif %}
|
||||
fsType: ext4
|
||||
fsType: {{ storage_provider_fstype }}
|
||||
readOnly: false
|
||||
{%- endif %}
|
||||
|
||||
|
@ -22,7 +22,7 @@ spec:
|
||||
"image": "{{ kolla_toolbox_image_full }}",
|
||||
"command": [
|
||||
"sh",
|
||||
"-ec",
|
||||
"-xec",
|
||||
"cp -a /config/..data/* /nova/;
|
||||
IP=$(ip addr show dev {{ tunnel_interface }} | grep -G ''inet '' | awk ''{print $2}'' | sed ''s@/.*@@'');
|
||||
mkdir -p /var/log/kolla/nova-init;
|
||||
|
@ -22,7 +22,7 @@ spec:
|
||||
"image": "{{ nova_libvirt_image_full }}",
|
||||
"command": [
|
||||
"sh",
|
||||
"-c",
|
||||
"-xec",
|
||||
"cp -a /config/..data/* /nova/;
|
||||
{%- if enable_libvirt_tcp == "yes" %}
|
||||
sed -i ''s|^listen_addr.*=.*|listen_addr=\"127.0.0.1\"|g'' /nova/libvirtd.conf;
|
||||
@ -72,6 +72,12 @@ spec:
|
||||
containers:
|
||||
- name: main
|
||||
image: "{{ nova_libvirt_image_full }}"
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
cp -a {{ container_config_directory }}/secrets /etc/libvirt;
|
||||
kolla_start
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
|
@ -1,6 +1,7 @@
|
||||
api_interface_address: 0.0.0.0
|
||||
orchestration_engine: KUBERNETES
|
||||
api_interface: "br-ex"
|
||||
tunnel_interface: "docker0"
|
||||
memcached_servers: "memcached"
|
||||
keystone_admin_url: "http://keystone-admin:35357/v3"
|
||||
keystone_internal_url: "http://keystone-public:5000/v3"
|
||||
|
@ -6,11 +6,14 @@ rabbitmq_management_external: "yes"
|
||||
kolla_kubernetes_external_bridge_ip: "172.18.0.1"
|
||||
kolla_kubernetes_external_subnet: "24"
|
||||
|
||||
storage_provider: "ceph"
|
||||
storage_provider_fstype: "xfs"
|
||||
|
||||
storage_interface: "docker0"
|
||||
storage_ceph:
|
||||
monitors:
|
||||
- 172.17.0.1
|
||||
ssh_user: root
|
||||
secretName: ceph-secret
|
||||
key: EXAMPLEEXAMPLEEXAMPLEEXAMPLEEXAMPLEEXAMPLE=
|
||||
user: kolla
|
||||
pool: kollavolumes
|
||||
secretName: ceph-kolla
|
||||
initial_mon: minikube
|
||||
|
@ -36,21 +36,81 @@ function wait_for_pods {
|
||||
set -x
|
||||
}
|
||||
|
||||
function wait_for_ceph_bootstrap {
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
end=$((end + 120))
|
||||
while true; do
|
||||
kubectl get pods --namespace=$1 | grep ceph-bootstrap-osd && \
|
||||
PENDING=True || PENDING=False
|
||||
[ $PENDING == "False" ] && break
|
||||
sleep 1
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo containers failed to start. && \
|
||||
kubectl get pods --namespace $1 && trap_error
|
||||
done
|
||||
}
|
||||
|
||||
function wait_for_vm {
|
||||
set +x
|
||||
count=0
|
||||
while true; do
|
||||
val=$(openstack server show $1 -f value -c OS-EXT-STS:vm_state)
|
||||
[ $val == "active" ] && break
|
||||
[ $val == "error" ] && openstack server show $1 && trap_error
|
||||
sleep 1;
|
||||
count=$((count+1))
|
||||
[ $count -gt 30 ] && trap_error
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
||||
function wait_for_vm_ssh {
|
||||
set +ex
|
||||
count=0
|
||||
while true; do
|
||||
sshpass -p 'cubswin:)' ssh -o UserKnownHostsFile=/dev/null -o \
|
||||
StrictHostKeyChecking=no cirros@$1 echo > /dev/null
|
||||
[ $? -eq 0 ] && break
|
||||
sleep 1;
|
||||
count=$((count+1))
|
||||
[ $count -gt 30 ] && echo failed to ssh. && trap_error
|
||||
done
|
||||
set -ex
|
||||
}
|
||||
|
||||
function scp_to_vm {
|
||||
sshpass -p 'cubswin:)' scp -o UserKnownHostsFile=/dev/null -o \
|
||||
StrictHostKeyChecking=no "$2" cirros@$1:"$3"
|
||||
}
|
||||
|
||||
function scp_from_vm {
|
||||
sshpass -p 'cubswin:)' scp -o UserKnownHostsFile=/dev/null -o \
|
||||
StrictHostKeyChecking=no cirros@$1:"$2" "$3"
|
||||
}
|
||||
|
||||
function ssh_to_vm {
|
||||
sshpass -p 'cubswin:)' ssh -o UserKnownHostsFile=/dev/null -o \
|
||||
StrictHostKeyChecking=no cirros@$1 "$2"
|
||||
}
|
||||
|
||||
function wait_for_cinder {
|
||||
count=0
|
||||
while true; do
|
||||
st=$(openstack volume show $1 -f value -c status)
|
||||
[ $st != "$2" ] && break
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
[ $count -gt 30 ] && echo Cinder volume failed. && trap_error
|
||||
done
|
||||
}
|
||||
|
||||
function trap_error {
|
||||
set +xe
|
||||
mkdir -p $WORKSPACE/logs/pods
|
||||
mkdir -p $WORKSPACE/logs/svc
|
||||
mkdir -p $WORKSPACE/logs/ceph
|
||||
mkdir -p $WORKSPACE/logs/openstack
|
||||
sudo cp /var/log/messages $WORKSPACE/logs
|
||||
sudo cp /var/log/syslog $WORKSPACE/logs
|
||||
sudo cp -a /etc/kubernetes $WORKSPACE/logs
|
||||
@ -67,6 +127,17 @@ function trap_error {
|
||||
'.items[].metadata | .namespace + " " + .name' | while read line; do
|
||||
NAMESPACE=$(echo $line | awk '{print $1}')
|
||||
NAME=$(echo $line | awk '{print $2}')
|
||||
echo $NAME | grep libvirt > /dev/null && \
|
||||
kubectl exec $NAME -c main --namespace $NAMESPACE \
|
||||
-- /bin/bash -c "virsh secret-list" > \
|
||||
$WORKSPACE/logs/virsh-secret-list.txt
|
||||
echo $NAME | grep libvirt > /dev/null && \
|
||||
kubectl exec $NAME -c main --namespace $NAMESPACE \
|
||||
-- /bin/bash -c "more /var/log/libvirt/qemu/* | cat" > \
|
||||
$WORKSPACE/logs/libvirt-vm-logs.txt
|
||||
kubectl exec $NAME -c main --namespace $NAMESPACE \
|
||||
-- /bin/bash -c "cat /var/log/kolla/*/*.log" > \
|
||||
$WORKSPACE/logs/openstack/$NAMESPACE-$NAME.txt
|
||||
kubectl describe pod $NAME --namespace $NAMESPACE > \
|
||||
$WORKSPACE/logs/pods/$NAMESPACE-$NAME.txt
|
||||
kubectl get pod $NAME --namespace $NAMESPACE -o json | jq -r \
|
||||
@ -92,6 +163,32 @@ function trap_error {
|
||||
$WORKSPACE/logs/ovs-init.txt
|
||||
done
|
||||
openstack catalog list > $WORKSPACE/logs/openstack-catalog.txt
|
||||
str="timeout 6s ceph -s"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str"
|
||||
sudo journalctl -u kubelet > $WORKSPACE/logs/kubelet.txt
|
||||
str="timeout 6s ceph pg 1.1 query"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str" \
|
||||
> $WORKSPACE/logs/ceph/pg1.1.txt
|
||||
str="timeout 6s ceph osd tree"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str" \
|
||||
> $WORKSPACE/logs/ceph/osdtree.txt
|
||||
str="timeout 6s ceph health"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str"
|
||||
str="cat /var/log/kolla/ceph/*.log"
|
||||
kubectl exec ceph-osd0 -c main --namespace=kolla -- /bin/bash -c "$str" \
|
||||
> $WORKSPACE/logs/ceph/osd.txt
|
||||
str="timeout 6s ceph pg dump"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str" \
|
||||
> $WORKSPACE/logs/ceph/pgdump.txt
|
||||
str="ceph osd crush tree"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str" \
|
||||
> $WORKSPACE/logs/ceph/crushtree.txt
|
||||
df -h > $WORKSPACE/logs/df.txt
|
||||
dmesg > $WORKSPACE/logs/dmesg
|
||||
kubectl get secret ceph-client-nova-keyring --namespace=kolla -o yaml
|
||||
kubectl get secret nova-libvirt-bin --namespace=kolla -o yaml
|
||||
openstack volume list > $WORKSPACE/logs/volumes.txt
|
||||
cp -a /etc/kolla $WORKSPACE/logs/
|
||||
exit -1
|
||||
}
|
||||
|
||||
@ -129,6 +226,8 @@ sed -i "s/^\(kolla_external_vip_address:\).*/\1 '$IP'/" \
|
||||
sed -i "s/^\(kolla_kubernetes_external_vip:\).*/\1 '$IP'/" \
|
||||
etc/kolla-kubernetes/kolla-kubernetes.yml
|
||||
|
||||
echo "kolla_base_distro: $2" >> kolla/etc/kolla/globals.yml
|
||||
|
||||
if [ -f /etc/redhat-release ]; then
|
||||
sudo yum install -y crudini jq
|
||||
else
|
||||
@ -150,8 +249,14 @@ pip install -r requirements.txt
|
||||
pip install .
|
||||
|
||||
crudini --set /etc/kolla/nova-compute/nova.conf libvirt virt_type qemu
|
||||
crudini --set /etc/kolla/nova-compute/nova.conf libvirt rbd_user nova
|
||||
UUID=$(awk '{if($1 == "rbd_secret_uuid:"){print $2}}' /etc/kolla/passwords.yml)
|
||||
crudini --set /etc/kolla/nova-compute/nova.conf libvirt rbd_secret_uuid $UUID
|
||||
|
||||
sed -i 's/log_outputs = "3:/log_outputs = "1:/' /etc/kolla/nova-libvirt/libvirtd.conf
|
||||
|
||||
sed -i \
|
||||
'/\[global\]/a osd pool default size = 1\nosd pool default min size = 1\n'\
|
||||
'/\[global\]/a osd pool default size = 1\nosd pool default min size = 1\nosd crush chooseleaf type = 0\ndebug default = 5\n'\
|
||||
/etc/kolla/ceph*/ceph.conf
|
||||
|
||||
./tools/fix-mitaka-config.py
|
||||
@ -169,7 +274,7 @@ repo_gpgcheck=1
|
||||
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
||||
EOEF
|
||||
yum install -y docker kubelet kubeadm kubectl kubernetes-cni
|
||||
yum install -y docker kubelet kubeadm kubectl kubernetes-cni sshpass
|
||||
systemctl start kubelet
|
||||
EOF
|
||||
else
|
||||
@ -178,17 +283,25 @@ apt-get install -y apt-transport-https
|
||||
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
|
||||
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
|
||||
apt-get update
|
||||
apt-get install -y docker.io kubelet kubeadm kubectl kubernetes-cni
|
||||
apt-get install -y docker.io kubelet kubeadm kubectl kubernetes-cni sshpass
|
||||
EOF
|
||||
fi
|
||||
cat >> /tmp/setup.$$ <<"EOF"
|
||||
mkdir -p /data/kolla
|
||||
dd if=/dev/zero of=/data/kolla/ceph-osd0.img bs=1 count=0 seek=3G
|
||||
df -h
|
||||
dd if=/dev/zero of=/data/kolla/ceph-osd0.img bs=5M count=1024
|
||||
dd if=/dev/zero of=/data/kolla/ceph-osd1.img bs=5M count=1024
|
||||
LOOP=$(losetup -f)
|
||||
losetup $LOOP /data/kolla/ceph-osd0.img
|
||||
parted $LOOP mklabel gpt
|
||||
parted $LOOP mkpart 1 0% 512m
|
||||
parted $LOOP mkpart 2 513m 100%
|
||||
dd if=/dev/zero of=/data/kolla/ceph-osd1.img bs=5M count=1024
|
||||
LOOP=$(losetup -f)
|
||||
losetup $LOOP /data/kolla/ceph-osd1.img
|
||||
parted $LOOP mklabel gpt
|
||||
parted $LOOP mkpart 1 0% 512m
|
||||
parted $LOOP mkpart 2 513m 100%
|
||||
partprobe
|
||||
systemctl start docker
|
||||
kubeadm init --service-cidr 172.16.128.0/24
|
||||
@ -242,6 +355,8 @@ wait_for_pods kube-system
|
||||
|
||||
kubectl describe node $NODE
|
||||
|
||||
kollakube tmpl pv mariadb
|
||||
|
||||
TOOLBOX=$(kollakube tmpl bootstrap neutron-create-db -o json | jq -r '.spec.template.spec.containers[0].image')
|
||||
sudo docker pull $TOOLBOX > /dev/null
|
||||
timeout 240s tools/setup-resolv-conf.sh
|
||||
@ -270,52 +385,98 @@ kollakube res create pod ceph-mon
|
||||
|
||||
wait_for_pods kolla
|
||||
|
||||
kollakube res create pod ceph-bootstrap-osd
|
||||
kollakube res create pod ceph-bootstrap-osd0
|
||||
pull_containers kolla
|
||||
|
||||
wait_for_pods kolla
|
||||
wait_for_ceph_bootstrap kolla
|
||||
|
||||
kollakube res create pod ceph-bootstrap-osd1
|
||||
|
||||
mkdir -p $WORKSPACE/logs/
|
||||
|
||||
pull_containers kolla
|
||||
wait_for_pods kolla
|
||||
wait_for_ceph_bootstrap kolla
|
||||
|
||||
kollakube res delete pod ceph-bootstrap-osd
|
||||
kollakube res create pod ceph-osd
|
||||
kollakube res delete pod ceph-bootstrap-osd0
|
||||
kollakube res delete pod ceph-bootstrap-osd1
|
||||
kollakube res create pod ceph-osd0
|
||||
kollakube res create pod ceph-osd1
|
||||
|
||||
wait_for_pods kolla
|
||||
|
||||
for x in images volumes vms; do
|
||||
kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash \
|
||||
-c "ceph osd pool create $x 64"
|
||||
kubectl exec ceph-osd0 -c main --namespace=kolla -- /bin/bash -c \
|
||||
"cat /etc/ceph/ceph.conf" > /tmp/$$
|
||||
kubectl create configmap ceph-conf --namespace=kolla \
|
||||
--from-file=ceph.conf=/tmp/$$
|
||||
kubectl exec ceph-osd0 -c main --namespace=kolla -- /bin/bash -c \
|
||||
"cat /etc/ceph/ceph.client.admin.keyring" > /tmp/$$
|
||||
rm -f /tmp/$$
|
||||
kollakube res create pod ceph-admin ceph-rbd
|
||||
|
||||
wait_for_pods kolla
|
||||
|
||||
echo rbd script:
|
||||
cat /usr/bin/rbd
|
||||
|
||||
str="ceph -w"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str" \
|
||||
> $WORKSPACE/logs/ceph.log &
|
||||
|
||||
for x in kollavolumes images volumes vms; do
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash \
|
||||
-c "ceph osd pool create $x 64; ceph osd pool set $x size 1; ceph osd pool set $x min_size 1"
|
||||
done
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash \
|
||||
-c "ceph osd pool delete rbd rbd --yes-i-really-really-mean-it"
|
||||
str="ceph auth get-or-create client.glance mon 'allow r' osd 'allow"
|
||||
str="$str class-read object_prefix rbd_children, allow rwx pool=images'"
|
||||
kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c \
|
||||
"$str" > /tmp/$$
|
||||
kubectl create secret generic ceph-client-glance-keyring --namespace=kolla\
|
||||
--from-file=ceph.client.glance.keyring=/tmp/$$
|
||||
str="ceph auth get-or-create client.cinder mon 'allow r' osd 'allow"
|
||||
str="$str class-read object_prefix rbd_children, allow rwx pool=volumes'"
|
||||
kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c \
|
||||
"$str" > /tmp/$$
|
||||
kubectl create secret generic ceph-client-cinder-keyring --namespace=kolla\
|
||||
--from-file=ceph.client.cinder.keyring=/tmp/$$
|
||||
str="ceph auth get-or-create client.nova mon 'allow r' osd 'allow "
|
||||
str="$str class-read object_prefix rbd_children, allow rwx pool=volumes, "
|
||||
str="$str allow rwx pool=vms, allow rwx pool=images'"
|
||||
kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c \
|
||||
"$str" > /tmp/$$
|
||||
kubectl create secret generic ceph-client-nova-keyring --namespace=kolla \
|
||||
--from-file=ceph.client.nova.keyring=/tmp/$$
|
||||
kubectl create secret generic nova-libvirt-bin --namespace=kolla \
|
||||
--from-file=data=<(awk '{if($1 == "key"){print $3}}' /tmp/$$ |
|
||||
tr -d '\n')
|
||||
kubectl exec ceph-osd -c main --namespace=kolla -- /bin/bash -c \
|
||||
"cat /etc/ceph/ceph.conf" > /tmp/$$
|
||||
kubectl create configmap ceph-conf --namespace=kolla \
|
||||
--from-file=ceph.conf=/tmp/$$
|
||||
str="ceph auth get-or-create client.kolla mon 'allow r' osd 'allow"
|
||||
str="$str class-read object_prefix rbd_children, allow rwx pool=kollavolumes'"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c \
|
||||
"$str" | awk '{if($1 == "key"){print $3}}' > /tmp/$$
|
||||
kubectl create secret generic ceph-kolla --namespace=kolla \
|
||||
--from-file=key=/tmp/$$
|
||||
#FIXME may need different flags for testing jewel
|
||||
str="cat /etc/ceph/ceph.conf"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str"
|
||||
|
||||
str="timeout 240s rbd create kollavolumes/mariadb --size 1024"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str"
|
||||
str="timeout 60s rbd create kollavolumes/rabbitmq --size 1024"
|
||||
kubectl exec ceph-admin -c main --namespace=kolla -- /bin/bash -c "$str"
|
||||
|
||||
for volume in mariadb rabbitmq; do
|
||||
str='DEV=$(rbd map --pool kollavolumes '$volume'); mkfs.xfs $DEV;'
|
||||
str="$str rbd unmap "'$DEV;'
|
||||
timeout 60s kubectl exec ceph-admin -c main --namespace=kolla -- \
|
||||
/bin/bash -c "$str"
|
||||
done
|
||||
|
||||
rm -f /tmp/$$
|
||||
kollakube res create secret nova-libvirt
|
||||
|
||||
for x in mariadb rabbitmq glance; do
|
||||
for x in mariadb rabbitmq; do
|
||||
kollakube res create pv $x
|
||||
kollakube res create pvc $x
|
||||
done
|
||||
@ -353,6 +514,7 @@ kollakube res create bootstrap nova-create-keystone-user \
|
||||
nova-create-keystone-endpoint-public \
|
||||
glance-create-keystone-endpoint-public \
|
||||
cinder-create-keystone-endpoint-public \
|
||||
cinder-create-keystone-endpoint-publicv2 \
|
||||
neutron-create-keystone-endpoint-public
|
||||
|
||||
wait_for_pods kolla
|
||||
@ -363,6 +525,7 @@ kollakube res delete bootstrap nova-create-keystone-user \
|
||||
nova-create-keystone-endpoint-public \
|
||||
glance-create-keystone-endpoint-public \
|
||||
cinder-create-keystone-endpoint-public \
|
||||
cinder-create-keystone-endpoint-publicv2 \
|
||||
neutron-create-keystone-endpoint-public
|
||||
|
||||
kollakube res create bootstrap glance-create-db glance-manage-db \
|
||||
@ -371,10 +534,12 @@ kollakube res create bootstrap glance-create-db glance-manage-db \
|
||||
nova-create-keystone-endpoint-internal \
|
||||
glance-create-keystone-endpoint-internal \
|
||||
cinder-create-keystone-endpoint-internal \
|
||||
cinder-create-keystone-endpoint-internalv2 \
|
||||
neutron-create-keystone-endpoint-internal \
|
||||
nova-create-keystone-endpoint-admin \
|
||||
glance-create-keystone-endpoint-admin \
|
||||
cinder-create-keystone-endpoint-admin \
|
||||
cinder-create-keystone-endpoint-adminv2 \
|
||||
neutron-create-keystone-endpoint-admin
|
||||
|
||||
pull_containers kolla
|
||||
@ -421,10 +586,12 @@ kollakube res delete bootstrap glance-create-db glance-manage-db \
|
||||
nova-create-keystone-endpoint-internal \
|
||||
glance-create-keystone-endpoint-internal \
|
||||
cinder-create-keystone-endpoint-internal \
|
||||
cinder-create-keystone-endpoint-internalv2 \
|
||||
neutron-create-keystone-endpoint-internal \
|
||||
nova-create-keystone-endpoint-admin \
|
||||
glance-create-keystone-endpoint-admin \
|
||||
cinder-create-keystone-endpoint-admin \
|
||||
cinder-create-keystone-endpoint-adminv2 \
|
||||
neutron-create-keystone-endpoint-admin
|
||||
|
||||
kollakube res create pod nova-api nova-conductor nova-scheduler glance-api \
|
||||
@ -492,15 +659,59 @@ wait_for_vm test
|
||||
wait_for_vm test2
|
||||
|
||||
openstack volume create --size 1 test
|
||||
|
||||
wait_for_cinder test creating
|
||||
|
||||
openstack server add volume test test
|
||||
|
||||
openstack help floating ip create
|
||||
FIP=$(openstack floating ip create external -f value -c floating_ip_address)
|
||||
FIP2=$(openstack floating ip create external -f value -c floating_ip_address)
|
||||
|
||||
FIP=$(openstack floating ip create external -f value -c ip)
|
||||
FIP2=$(openstack floating ip create external -f value -c ip)
|
||||
|
||||
openstack ip floating add $FIP test
|
||||
openstack ip floating add $FIP2 test2
|
||||
openstack server add floating ip test $FIP
|
||||
openstack server add floating ip test2 $FIP2
|
||||
|
||||
openstack server list
|
||||
|
||||
wait_for_vm_ssh $FIP
|
||||
|
||||
sshpass -p 'cubswin:)' ssh -o UserKnownHostsFile=/dev/null -o \
|
||||
StrictHostKeyChecking=no cirros@$FIP curl 169.254.169.254
|
||||
|
||||
sshpass -p 'cubswin:)' ssh -o UserKnownHostsFile=/dev/null -o \
|
||||
StrictHostKeyChecking=no cirros@$FIP ping -c 4 $FIP2
|
||||
|
||||
openstack volume show test -f value -c status
|
||||
TESTSTR=$(uuidgen)
|
||||
cat > /tmp/$$ <<EOF
|
||||
#!/bin/sh -xe
|
||||
mkdir /tmp/mnt
|
||||
sudo /sbin/mkfs.vfat /dev/vdb
|
||||
sudo mount /dev/vdb /tmp/mnt
|
||||
sudo /bin/sh -c 'echo $TESTSTR > /tmp/mnt/test.txt'
|
||||
sudo umount /tmp/mnt
|
||||
EOF
|
||||
chmod +x /tmp/$$
|
||||
|
||||
scp_to_vm $FIP /tmp/$$ /tmp/script
|
||||
ssh_to_vm $FIP "/tmp/script"
|
||||
|
||||
openstack server remove volume test test
|
||||
wait_for_cinder test in-use
|
||||
openstack server add volume test2 test
|
||||
wait_for_cinder test available
|
||||
|
||||
cat > /tmp/$$ <<EOF
|
||||
#!/bin/sh -xe
|
||||
mkdir /tmp/mnt
|
||||
sudo mount /dev/vdb /tmp/mnt
|
||||
sudo cat /tmp/mnt/test.txt
|
||||
sudo cp /tmp/mnt/test.txt /tmp
|
||||
sudo chown cirros /tmp/test.txt
|
||||
EOF
|
||||
chmod +x /tmp/$$
|
||||
|
||||
scp_to_vm $FIP2 /tmp/$$ /tmp/script
|
||||
ssh_to_vm $FIP2 "/tmp/script"
|
||||
scp_from_vm $FIP2 /tmp/test.txt /tmp/$$.2
|
||||
|
||||
diff -u <(echo $TESTSTR) /tmp/$$.2
|
||||
|
Loading…
Reference in New Issue
Block a user