Edit nccli string to utilscli for Ceph Utility Container

Updated the nccli string to utilscli to avoid AT&T specific Network
cloud terminology.

Change-Id: I8dae02559a422dab0bdb8007daaa4f86a67f087e
This commit is contained in:
Kavva, Jagan Mohan (jk330k) 2019-08-07 17:00:36 -05:00 committed by Luna Das
parent 58fcd9a861
commit c9b9c5aaeb
10 changed files with 57 additions and 57 deletions

6
README
View File

@ -79,9 +79,9 @@ Usage
Get in to the utility pod using kubectl exec. To perform any operation on the ceph cluster use the below example.
example:
nccli ceph osd tree
nccli rbd ls
nccli rados lspools
utilscli ceph osd tree
utilscli rbd ls
utilscli rados lspools
TODO
----

View File

@ -18,7 +18,7 @@ limitations under the License.
set -ex
function check_osd_status () {
OSD_ID=$(nccli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.status == "down")|.id')
OSD_ID=$(utilscli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.status == "down")|.id')
if [ "${OSD_ID}" != '' ];then
for i in $OSD_ID; do
echo "OSD id $i is in Down Status"
@ -35,7 +35,7 @@ function osd_remove () {
read -p "Enter 'yes' to purge OSD=$id and 'no' to skip=" YN
if [[ $YN == "y" || $YN == "Y" || $YN == "yes" || $YN == "YES" ]]; then
echo "Purging OSD=$id"
nccli ceph osd purge $id --yes-i-really-mean-it
utilscli ceph osd purge $id --yes-i-really-mean-it
sleep 3
elif [[ $YN == "n" || $YN == "N" || $YN == "no" || $YN == "NO" ]]; then
echo "Not purging OSD=$id"
@ -47,10 +47,10 @@ function osd_remove () {
function osd_remove_by_id () {
OSDID=$1
OSD_STATUS=$(nccli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.id == '$OSDID')|.status')
OSD_STATUS=$(utilscli ceph osd tree -f json-pretty | jq '.nodes[]|select(.type=="osd")|select(.id == '$OSDID')|.status')
if [ "$OSD_STATUS" == '"down"' ]; then
echo "OSD id $OSDID is in Down Status, So purging it"
nccli ceph osd purge $OSDID --yes-i-really-mean-it
utilscli ceph osd purge $OSDID --yes-i-really-mean-it
elif [[ -z "$OSD_STATUS" ]]; then
echo "OSD id $OSDID is not found, Please enter correct OSD id"
exit
@ -61,18 +61,18 @@ function osd_remove_by_id () {
}
function reweight_osds () {
for OSD_ID in $(nccli ceph osd df | awk '$3 == "0" {print $1}'); do
OSD_WEIGHT=$(nccli ceph osd df --format json-pretty| grep -A7 "\bosd.${OSD_ID}\b" | awk '/"kb"/{ gsub(",",""); d= $2/1073741824 ; r = sprintf("%.2f", d); print r }');
nccli ceph osd crush reweight osd.${OSD_ID} ${OSD_WEIGHT};
for OSD_ID in $(utilscli ceph osd df | awk '$3 == "0" {print $1}'); do
OSD_WEIGHT=$(utilscli ceph osd df --format json-pretty| grep -A7 "\bosd.${OSD_ID}\b" | awk '/"kb"/{ gsub(",",""); d= $2/1073741824 ; r = sprintf("%.2f", d); print r }');
utilscli ceph osd crush reweight osd.${OSD_ID} ${OSD_WEIGHT};
done
}
usage() {
set +ex
echo "Usage: nccli osd-maintenance check_osd_status"
echo " nccli osd-maintenance osd_remove"
echo " nccli osd-maintenance osd_remove_by_id --osd-id <OSDID>"
echo " nccli osd-maintenance reweight_osds"
echo "Usage: utilscli osd-maintenance check_osd_status"
echo " utilscli osd-maintenance osd_remove"
echo " utilscli osd-maintenance osd_remove_by_id --osd-id <OSDID>"
echo " utilscli osd-maintenance reweight_osds"
exit 1
}

View File

@ -17,9 +17,9 @@ limitations under the License.
*/}}
usage() {
echo "Backup Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /backup)] [-p <ceph rbd pool> (optional, default: rbd)]"
echo "Restore Usage: nccli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)]"
echo "Snapshot Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove|show> (required) ]"
echo "Backup Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /backup)] [-p <ceph rbd pool> (optional, default: rbd)]"
echo "Restore Usage: utilscli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)]"
echo "Snapshot Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove|show> (required) ]"
exit 1
}
@ -53,11 +53,11 @@ timestamp="$(date +%F_%T)"
if [[ ! -z "${restore_file}" ]]; then
if [[ -e "${restore_file}" ]]; then
rbd_image="$(echo "${restore_file}" | rev | awk -v FS='/' '{print $1}' | rev | cut -f 1 -d '.')"
if (nccli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then
nccli rbd mv ${rbd_pool}/${rbd_image} ${rbd_pool}/${rbd_image}.orig-${timestamp}
if (utilscli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then
utilscli rbd mv ${rbd_pool}/${rbd_image} ${rbd_pool}/${rbd_image}.orig-${timestamp}
echo "WARNING: Existing PVC/RBD image has been moved to ${rbd_pool}/${rbd_image}.orig-${timestamp}"
fi
nccli rbd import ${restore_file} ${rbd_pool}/${rbd_image}
utilscli rbd import ${restore_file} ${rbd_pool}/${rbd_image}
echo "INFO: Backup has been restored into ${rbd_pool}/${rbd_image}"
else
echo "ERROR: Missing restore file!"
@ -69,26 +69,26 @@ elif [[ ! -z "${snapshot}" ]]; then
if [[ "x${snapshot}x" == "xcreatex" ]]; then
snap_name="${pvc_name}-${timestamp}"
nccli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name}
utilscli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name}
echo "INFO: Snapshot ${rbd_pool}/${rbd_image}@${snap_name} has been created for PVC ${pvc_name}"
elif [[ "x${snapshot}x" == "xrollback" ]]; then
snap_name=$(nccli rbd snap ls ${rbd_pool}/${rbd_image})
nccli rbd snap rollback ${rbd_pool}/${rbd_image}@${snap_name}
snap_name=$(utilscli rbd snap ls ${rbd_pool}/${rbd_image})
utilscli rbd snap rollback ${rbd_pool}/${rbd_image}@${snap_name}
echo "WARNING: Rolled back snapshot ${rbd_pool}/${rbd_image}@${snap_name} for ${pvc_name}"
elif [[ "x${snapshot}x" == "xremovex" ]]; then
nccli rbd snap purge ${rbd_pool}/${rbd_image}
utilscli rbd snap purge ${rbd_pool}/${rbd_image}
echo "Removed snapshot(s) for ${pvc_name}"
elif [[ "x${snapshot}x" == "xshowx" ]]; then
echo "INFO: This PV is mapped to the following RBD Image:"
echo "${rbd_pool}/${rbd_image}"
echo -e "\nINFO: Current open sessions to RBD Image:"
nccli rbd status ${rbd_pool}/${rbd_image}
utilscli rbd status ${rbd_pool}/${rbd_image}
echo -e "\nINFO: RBD Image information:"
nccli rbd info ${rbd_pool}/${rbd_image}
utilscli rbd info ${rbd_pool}/${rbd_image}
echo -e "\nINFO: RBD Image snapshot details:"
rbd snap ls ${rbd_pool}/${rbd_image}
echo -e "\nINFO: RBD Image size details:"
nccli rbd du ${rbd_pool}/${rbd_image}
utilscli rbd du ${rbd_pool}/${rbd_image}
else
echo "ERROR: Missing arguement for snapshot option!"
fi
@ -105,17 +105,17 @@ else
volume="$(kubectl -n ${nspace} get pvc ${pvc_name} --no-headers | awk '{ print $3 }')"
rbd_image="$(kubectl get pv "${volume}" -o json | jq -r '.spec.rbd.image')"
if [[ -z "${volume}" ]] || (! nccli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then
if [[ -z "${volume}" ]] || (! utilscli rbd info "${rbd_pool}"/"${rbd_image}" | grep -q id); then
echo "ERROR: PVC does not exist or is missing! Cannot continue with backup for ${pvc_name}"
exit 1
else
# Create current snapshot and export to a file
snap_name="${pvc_name}-${timestamp}"
backup_name="${rbd_image}.${pvc_name}-${timestamp}"
nccli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name}
nccli rbd export ${rbd_pool}/${rbd_image}@${snap_name} ${backup_dest}/${backup_name}
utilscli rbd snap create ${rbd_pool}/${rbd_image}@${snap_name}
utilscli rbd export ${rbd_pool}/${rbd_image}@${snap_name} ${backup_dest}/${backup_name}
# Remove snapshot otherwise we may see an issue cleaning up the PVC from K8s, and from Ceph.
nccli rbd snap rm ${rbd_pool}/${rbd_image}@${snap_name}
utilscli rbd snap rm ${rbd_pool}/${rbd_image}@${snap_name}
echo "INFO: PV ${pvc_name} saved to:"
echo "${backup_dest}/${backup_name}"
fi

View File

@ -38,8 +38,8 @@ data:
ceph-utility-rootwrap: |
{{ tuple "bin/utility/_ceph-utility-rootwrap.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
nccli: |
{{ tuple "bin/utility/_nccli.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
utilscli: |
{{ tuple "bin/utility/_utilscli.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
osd-maintenance: |
{{ tuple "bin/utility/_osd-maintenance.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}

View File

@ -22,6 +22,6 @@ kind: ConfigMap
metadata:
name: {{ printf "%s-%s" $envAll.Release.Name "sudoers" }}
data:
nccli-sudo: |
{{ tuple "bin/_nccli-sudo.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
utilscli-sudo: |
{{ tuple "bin/_utilscli-sudo.tpl" . | include "helm-toolkit.utils.template" | indent 4 }}
{{- end }}

View File

@ -89,8 +89,8 @@ spec:
subPath: managekey.sh
readOnly: true
- name: ceph-utility-bin
mountPath: /usr/local/bin/nccli
subPath: nccli
mountPath: /usr/local/bin/utilscli
subPath: utilscli
readOnly: true
- name: ceph-utility-bin
mountPath: /usr/local/bin/ceph-utility-rootwrap
@ -105,8 +105,8 @@ spec:
subPath: rbd_pv
readOnly: true
- name: ceph-utility-sudoers
mountPath: /etc/sudoers.d/nccli-sudo
subPath: nccli-sudo
mountPath: /etc/sudoers.d/utilscli-sudo
subPath: utilscli-sudo
readOnly: true
- name: ceph-etc
mountPath: /etc/ceph/ceph.conf

View File

@ -16,28 +16,28 @@ This MOP covers Maintenance Activities related to Ceph.
To check the current status of OSDs, execute the following:
```
nccli osd-maintenance check_osd_status
utilscli osd-maintenance check_osd_status
```
### OSD Removal
To purge OSDs in down state, execute the following:
```
nccli osd-maintenance osd_remove
utilscli osd-maintenance osd_remove
```
### OSD Removal By OSD ID
To purge OSDs by OSD ID in down state, execute the following:
```
nccli osd-maintenance remove_osd_by_id --osd-id <OSDID>
utilscli osd-maintenance remove_osd_by_id --osd-id <OSDID>
```
### Reweight OSDs
To adjust an OSDs crush weight in the CRUSH map of a running cluster, execute the following:
```
nccli osd-maintenance reweight_osds
utilscli osd-maintenance reweight_osds
```
## 2. Replace failed OSD ##
@ -46,11 +46,11 @@ In the context of a failed drive, Please follow below procedure. Following comma
Capture the failed OSD ID. Check for status `down`
nccli ceph osd tree
utilscli ceph osd tree
Remove the OSD from Cluster. Replace `<OSD_ID>` with above captured failed OSD ID
nccli osd-maintenance osd_remove_by_id --osd-id <OSD_ID>
utilscli osd-maintenance osd_remove_by_id --osd-id <OSD_ID>
Remove the failed drive and replace it with a new one without bringing down the node.
@ -60,5 +60,5 @@ Once new drive is placed, delete the concern OSD pod in `error` or `CrashLoopBac
Once pod is deleted, kubernetes will re-spin a new pod for the OSD. Once Pod is up, the osd is added to ceph cluster with weight equal to `0`. we need to re-weight the osd.
nccli osd-maintenance reweight_osds
utilscli osd-maintenance reweight_osds

View File

@ -4,27 +4,27 @@ This MOP covers Maintenance Activities related to using the rbd_pv script
to backup and recover PVCs within your kubernetes environment using Ceph.
## Usage
Execute nccli rbd_pv without arguements to list usage options.
Execute utilscli rbd_pv without arguements to list usage options.
```
nccli rbd_pv
Backup Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /tmp/backup)] [-p <ceph rbd pool> (optional, default: rbd)]
Restore Usage: nccli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)]
Snapshot Usage: nccli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove> (required)]
utilscli rbd_pv
Backup Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-d <backup dest> (optional, default: /tmp/backup)] [-p <ceph rbd pool> (optional, default: rbd)]
Restore Usage: utilscli rbd_pv [-r <restore_file>] [-p <ceph rbd pool> (optional, default: rbd)]
Snapshot Usage: utilscli rbd_pv [-b <pvc name>] [-n <namespace>] [-p <ceph rbd pool> (optional, default: rbd] [-s <create|rollback|remove> (required)]
```
## Backing up a PVC/PV from RBD
To backup a PV, execute the following:
```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack
utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack
```
## Restoring a PVC/PV backup
To restore a PV RBD backup image, execute the following:
```
nccli rbd_pv -r /backup/kubernetes-dynamic-pvc-ab1f2e8f-21a4-11e9-ab61-ca77944df03c.img
utilscli rbd_pv -r /backup/kubernetes-dynamic-pvc-ab1f2e8f-21a4-11e9-ab61-ca77944df03c.img
```
NOTE: The original PVC/PV will be renamed and not overwritten.
NOTE: Before restoring, you _must_ ensure it is not mounted!
@ -32,13 +32,13 @@ NOTE: Before restoring, you _must_ ensure it is not mounted!
## Creating a Snapshot for a PVC/PV
```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s create
utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s create
```
## Rolling back to a Snapshot for a PVC/PV
```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s rollback
utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s rollback
```
NOTE: Before rolling back a snapshot, you _must_ ensure the PVC/PV volume is not mounted!!
@ -46,7 +46,7 @@ NOTE: Before rolling back a snapshot, you _must_ ensure the PVC/PV volume is not
## Removing a Snapshot for a PVC/PV
```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s remove
utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s remove
```
NOTE: This will remove all snapshots in Ceph associated to this PVC/PV!
@ -54,5 +54,5 @@ NOTE: This will remove all snapshots in Ceph associated to this PVC/PV!
## Show Snapshot and Image details for a PVC/PV
```
nccli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s show
utilscli rbd_pv -b mysql-data-mariadb-server-0 -n openstack -s show
```