Merge "Fix cephfs access after upgrading"
This commit is contained in:
commit
f12750e297
@ -31,15 +31,12 @@ set -x
|
|||||||
# Check if the filesystem for the system RWX provisioner is present
|
# Check if the filesystem for the system RWX provisioner is present
|
||||||
ceph fs ls | grep ${FS_NAME}
|
ceph fs ls | grep ${FS_NAME}
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
# Attempt to create the pool if not present, this should be present
|
# If we have existing metadata/data pools, use them
|
||||||
ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${DATA_POOL_NAME}
|
ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${DATA_POOL_NAME} --force
|
||||||
if [ $? -eq 22 ]; then
|
# Reset the filesystem and journal
|
||||||
# We need to rebuild the fs since we have hit:
|
ceph fs reset ${FS_NAME} --yes-i-really-mean-it
|
||||||
# Error EINVAL: pool 'kube-cephfs-metadata' already contains some
|
cephfs-journal-tool --rank=${FS_NAME}:0 event recover_dentries summary
|
||||||
# objects. Use an empty pool instead.
|
cephfs-journal-tool --rank=${FS_NAME}:0 journal reset
|
||||||
ceph fs new ${FS_NAME} ${METADATA_POOL_NAME} ${DATA_POOL_NAME} --force
|
|
||||||
ceph fs reset ${FS_NAME} --yes-i-really-mean-it
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start the Ceph MDS
|
# Start the Ceph MDS
|
||||||
|
@ -281,6 +281,9 @@
|
|||||||
script: recover_cephfs.sh
|
script: recover_cephfs.sh
|
||||||
register: cephfs_recovery_out
|
register: cephfs_recovery_out
|
||||||
|
|
||||||
|
- name: Create ceph.client.guest.keyring to allow ceph mount again
|
||||||
|
command: touch /etc/ceph/ceph.client.guest.keyring
|
||||||
|
|
||||||
- debug: var=cephfs_recovery_out.stdout_lines
|
- debug: var=cephfs_recovery_out.stdout_lines
|
||||||
|
|
||||||
- name: Restart ceph one more time to pick latest changes
|
- name: Restart ceph one more time to pick latest changes
|
||||||
|
Loading…
Reference in New Issue
Block a user