From acd54ba601e34dafac063f94054bb80348e73dcd Mon Sep 17 00:00:00 2001 From: "Suren A. Chilingaryan" Date: Sun, 6 Oct 2019 04:43:13 +0200 Subject: Script to fullhy clean-up outdated rc --- scripts/gluster.sh | 177 ------------------------------------ scripts/hawakular.sh | 18 ---- scripts/kube-ops-view.sh | 12 --- scripts/maintain/gluster/gluster.sh | 177 ++++++++++++++++++++++++++++++++++++ scripts/maintain/gluster/opts.sh | 21 +++++ scripts/maintain/prunerc.sh | 60 ++++++++++++ scripts/opts.sh | 21 ----- scripts/provision/hawakular.sh | 18 ++++ scripts/provision/kube-ops-view.sh | 12 +++ 9 files changed, 288 insertions(+), 228 deletions(-) delete mode 100755 scripts/gluster.sh delete mode 100755 scripts/hawakular.sh delete mode 100755 scripts/kube-ops-view.sh create mode 100755 scripts/maintain/gluster/gluster.sh create mode 100644 scripts/maintain/gluster/opts.sh create mode 100644 scripts/maintain/prunerc.sh delete mode 100644 scripts/opts.sh create mode 100755 scripts/provision/hawakular.sh create mode 100755 scripts/provision/kube-ops-view.sh diff --git a/scripts/gluster.sh b/scripts/gluster.sh deleted file mode 100755 index a3ff186..0000000 --- a/scripts/gluster.sh +++ /dev/null @@ -1,177 +0,0 @@ -#! /bin/bash - -. opts.sh - -[ $? -ne 0 -o -z "$gpod" ] && { echo "No storage pods are running..." ; exit 1 ; } -[ -z "$1" ] && { echo "Usage: $0 [src] [dst]" ; exit 1 ; } -action=$1 -shift - - -function info { - vol=$1 - - status=$(gluster volume info "$vol" | grep -P 'Status' | awk '{ print $2 }' | tr -d '\r\n') - bricks=$(gluster volume info "$vol" | grep -P 'Number of Bricks' | awk '{ print $NF }' | tr -d '\r\n') - avail=$(gluster volume status "$vol" detail | grep Brick | wc -l) - online=$(gluster volume status "$vol" detail | grep Online | grep Y | wc -l) - - echo "Volume $vol: $status (Bricks: $bricks, Available: $avail, Online: $online)" -} - -function heal { - vol=$1 - - distributed=0 - gluster volume info "$vol" | grep "Type:" | grep -i "Distribute" &> /dev/null - [ $? -eq 0 ] && distributed=1 - - echo "Healing volume $vol" - echo "-------------------" - gluster volume heal "$vol" full - gluster volume heal "$vol" info - - if [ $distributed -eq 1 ]; then - echo "Rebalancing distributed volume $vol" - gluster volume rebalance "$vol" fix-layout start - fi - - - gluster volume status "$vol" -} - -function migrate { - vol=$1 - src=$2 - dst=$3 - - [ -z "$src" -o -z "$dst" ] && { echo "Source and destination servers are required" ; exit 1 ; } - - src_brick=$(gluster volume info $vol | grep -P '^Brick\d+:' | awk '{ print $2 }' | grep -P "^$src" | tr -d '\r\n' ) - dst_brick=${src_brick/$src/$dst} - - [ -z "$src_brick" -o -z "$dst_brick" ] && return 0 - - echo "Volume $vol: migrating failed brick" - echo " from $src_brick" - echo " to $dst_brick" - echo "Press enter to continue" - read - [ $? -ne 0 ] && exit - - gluster volume replace-brick $vol "$src_brick" "$dst_brick" commit force - heal $vol -} - - -function transport { - vol=$1 - transport=${2:-tcp,rdma} - echo "Changing $vol to transport $transport" - gluster volume stop "$vol" - gluster volume set "$vol" config.transport "$transport" - gluster volume start "$vol" -} - - - -function restart { - vol=$1 - - echo $vol - bricks=$(gluster volume info "$vol" | grep -P 'Number of Bricks' | awk '{ print $NF }' | tr -d '\r\n') - online=$(gluster volume status "$vol" detail | grep Online | grep Y | wc -l) - - if [ "$bricks" -ne "$online" ]; then - echo "Restarting $vol ($online bricks of $bricks are/is online)" - gluster --mode=script volume stop "$vol" - gluster --mode=script volume start "$vol" - fi -} - -function delete_failed { - vol=$1 - - bricks=$(gluster volume info "$vol" | grep -P 'Number of Bricks' | awk '{ print $NF }' | tr -d '\r\n') - online=$(gluster volume status "$vol" detail | grep Online | grep Y | wc -l) - - if [ "$online" == "0" ]; then - echo "Deleting $vol ($online bricks of $bricks are/is online)" -# gluster --mode=script volume stop "$vol" - gluster --mode=script volume delete "$vol" - fi -} - - -function lvm_clean { - used_bricks=`gluster volume info | grep "/brick_" | sed -r -e 's/.*brick_(.*)\/brick/\1/'` - - for ip in $(seq 1 3); do - echo "Node $ip" - echo "========" - lvm_bricks=`node $ip lvscan | grep brick | sed -r -e 's/.*brick_([0-9a-z]*)[^0-9a-z].*/\1/'` - - diff=$(echo $used_bricks $lvm_bricks | tr -d '\r' | tr ' ' '\n' | sort | uniq -u) - remove=$(echo "$diff $lvm_bricks" | tr -d '\r' | tr ' ' '\n' | sort | uniq -d) - - for id in $remove; do - echo "Removing ---------------------------------------------" - node $ip lvs -o name,time,size -S "'name =~ $id'" - echo "Removing ---------------------------------------------" - node $ip lvremove -y -S "'name =~ $id'" - done - done - - -} - -function lvm_remove_today { - for ip in $(seq 1 3); do - node $ip hostname -#lvdisplay -o name,time -S 'time since "2018-03-16"' - done -} - - -function heketi_cmd { - heketi "$@" -} - -function heketi_clean { - heketi_vols=`heketi topology info | grep "Name: vol_" | sed -r -e 's/.*(vol_[0-9a-z]+)\s*$/\1/'` - gluster_vols=`gluster volume info | grep "Name: vol_" | sed -r -e 's/.*(vol_[0-9a-z]+)\s*$/\1/'` - echo $heketi_vols - - diff=$(echo $gluster_vols $heketi_vols | tr -d '\r' | tr ' ' '\n' | sort | uniq -u) - remove=$(echo "$diff $gluster_vols" | tr -d '\r' | tr ' ' '\n' | sort | uniq -d) - - for vol in $remove; do - echo "Stopping and deleting volume $vol" - echo "---------------------------------" - gluster --mode=script volume stop "$vol" - gluster --mode=script volume delete "$vol" - done -} - -if [[ "$action" =~ ^heketi ]]; then - eval "$action" "$@" -elif [[ "$action" =~ ^lvm ]]; then - eval "$action" "$@" -elif [[ -z "$1" || "$1" =~ ^all ]]; then - all=0 - [ "$1" == "all_heketi" ] && all=1 - [[ "$1" =~ ^all ]] && shift - - vols=$(gluster volume info | grep -P '^Volume Name' | awk '{ print $NF }' | tr '\r\n' ' ') - for vol in $vols; do - if [ $all -eq 0 ]; then - [[ "$vol" =~ [0-9] ]] && continue - [[ "$vol" =~ ^vol_ ]] && continue - [[ "$vol" =~ ^heketi ]] && continue - fi - - eval "$action" "$vol" "$@" - done -else - eval "$action" "$@" -fi diff --git a/scripts/hawakular.sh b/scripts/hawakular.sh deleted file mode 100755 index 73e3a87..0000000 --- a/scripts/hawakular.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -case $1 in - "stop") - oc -n openshift-infra scale --replicas 0 rc/hawkular-metrics - oc -n openshift-infra scale --replicas 0 rc/heapster - oc -n openshift-infra scale --replicas 0 dc/hawkular-cassandra - ;; - "start") - oc -n openshift-infra scale --replicas 0 dc/hawkular-cassandra - sleep 1 - oc -n openshift-infra scale --replicas 0 rc/heapster - sleep 1 - oc -n openshift-infra scale --replicas 0 rc/hawkular-metrics - ;; - *) - echo "Usage: $0 stop/start" -esac diff --git a/scripts/kube-ops-view.sh b/scripts/kube-ops-view.sh deleted file mode 100755 index ca1389e..0000000 --- a/scripts/kube-ops-view.sh +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash - -NS=mon - - -oc -n $NS new-project ocp-ops-view -oc -n $NS create sa kube-ops-view -oc -n $NS adm policy add-scc-to-user anyuid -z kube-ops-view -oc -n $NS adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:mon:kube-ops-view -oc -n $NS apply -f https://raw.githubusercontent.com/raffaelespazzoli/kube-ops-view/ocp/deploy-openshift/kube-ops-view.yaml -oc -n $NS expose svc kube-ops-view -oc -n $NS get route | grep kube-ops-view | awk '{print $2}' diff --git a/scripts/maintain/gluster/gluster.sh b/scripts/maintain/gluster/gluster.sh new file mode 100755 index 0000000..a3ff186 --- /dev/null +++ b/scripts/maintain/gluster/gluster.sh @@ -0,0 +1,177 @@ +#! /bin/bash + +. opts.sh + +[ $? -ne 0 -o -z "$gpod" ] && { echo "No storage pods are running..." ; exit 1 ; } +[ -z "$1" ] && { echo "Usage: $0 [src] [dst]" ; exit 1 ; } +action=$1 +shift + + +function info { + vol=$1 + + status=$(gluster volume info "$vol" | grep -P 'Status' | awk '{ print $2 }' | tr -d '\r\n') + bricks=$(gluster volume info "$vol" | grep -P 'Number of Bricks' | awk '{ print $NF }' | tr -d '\r\n') + avail=$(gluster volume status "$vol" detail | grep Brick | wc -l) + online=$(gluster volume status "$vol" detail | grep Online | grep Y | wc -l) + + echo "Volume $vol: $status (Bricks: $bricks, Available: $avail, Online: $online)" +} + +function heal { + vol=$1 + + distributed=0 + gluster volume info "$vol" | grep "Type:" | grep -i "Distribute" &> /dev/null + [ $? -eq 0 ] && distributed=1 + + echo "Healing volume $vol" + echo "-------------------" + gluster volume heal "$vol" full + gluster volume heal "$vol" info + + if [ $distributed -eq 1 ]; then + echo "Rebalancing distributed volume $vol" + gluster volume rebalance "$vol" fix-layout start + fi + + + gluster volume status "$vol" +} + +function migrate { + vol=$1 + src=$2 + dst=$3 + + [ -z "$src" -o -z "$dst" ] && { echo "Source and destination servers are required" ; exit 1 ; } + + src_brick=$(gluster volume info $vol | grep -P '^Brick\d+:' | awk '{ print $2 }' | grep -P "^$src" | tr -d '\r\n' ) + dst_brick=${src_brick/$src/$dst} + + [ -z "$src_brick" -o -z "$dst_brick" ] && return 0 + + echo "Volume $vol: migrating failed brick" + echo " from $src_brick" + echo " to $dst_brick" + echo "Press enter to continue" + read + [ $? -ne 0 ] && exit + + gluster volume replace-brick $vol "$src_brick" "$dst_brick" commit force + heal $vol +} + + +function transport { + vol=$1 + transport=${2:-tcp,rdma} + echo "Changing $vol to transport $transport" + gluster volume stop "$vol" + gluster volume set "$vol" config.transport "$transport" + gluster volume start "$vol" +} + + + +function restart { + vol=$1 + + echo $vol + bricks=$(gluster volume info "$vol" | grep -P 'Number of Bricks' | awk '{ print $NF }' | tr -d '\r\n') + online=$(gluster volume status "$vol" detail | grep Online | grep Y | wc -l) + + if [ "$bricks" -ne "$online" ]; then + echo "Restarting $vol ($online bricks of $bricks are/is online)" + gluster --mode=script volume stop "$vol" + gluster --mode=script volume start "$vol" + fi +} + +function delete_failed { + vol=$1 + + bricks=$(gluster volume info "$vol" | grep -P 'Number of Bricks' | awk '{ print $NF }' | tr -d '\r\n') + online=$(gluster volume status "$vol" detail | grep Online | grep Y | wc -l) + + if [ "$online" == "0" ]; then + echo "Deleting $vol ($online bricks of $bricks are/is online)" +# gluster --mode=script volume stop "$vol" + gluster --mode=script volume delete "$vol" + fi +} + + +function lvm_clean { + used_bricks=`gluster volume info | grep "/brick_" | sed -r -e 's/.*brick_(.*)\/brick/\1/'` + + for ip in $(seq 1 3); do + echo "Node $ip" + echo "========" + lvm_bricks=`node $ip lvscan | grep brick | sed -r -e 's/.*brick_([0-9a-z]*)[^0-9a-z].*/\1/'` + + diff=$(echo $used_bricks $lvm_bricks | tr -d '\r' | tr ' ' '\n' | sort | uniq -u) + remove=$(echo "$diff $lvm_bricks" | tr -d '\r' | tr ' ' '\n' | sort | uniq -d) + + for id in $remove; do + echo "Removing ---------------------------------------------" + node $ip lvs -o name,time,size -S "'name =~ $id'" + echo "Removing ---------------------------------------------" + node $ip lvremove -y -S "'name =~ $id'" + done + done + + +} + +function lvm_remove_today { + for ip in $(seq 1 3); do + node $ip hostname +#lvdisplay -o name,time -S 'time since "2018-03-16"' + done +} + + +function heketi_cmd { + heketi "$@" +} + +function heketi_clean { + heketi_vols=`heketi topology info | grep "Name: vol_" | sed -r -e 's/.*(vol_[0-9a-z]+)\s*$/\1/'` + gluster_vols=`gluster volume info | grep "Name: vol_" | sed -r -e 's/.*(vol_[0-9a-z]+)\s*$/\1/'` + echo $heketi_vols + + diff=$(echo $gluster_vols $heketi_vols | tr -d '\r' | tr ' ' '\n' | sort | uniq -u) + remove=$(echo "$diff $gluster_vols" | tr -d '\r' | tr ' ' '\n' | sort | uniq -d) + + for vol in $remove; do + echo "Stopping and deleting volume $vol" + echo "---------------------------------" + gluster --mode=script volume stop "$vol" + gluster --mode=script volume delete "$vol" + done +} + +if [[ "$action" =~ ^heketi ]]; then + eval "$action" "$@" +elif [[ "$action" =~ ^lvm ]]; then + eval "$action" "$@" +elif [[ -z "$1" || "$1" =~ ^all ]]; then + all=0 + [ "$1" == "all_heketi" ] && all=1 + [[ "$1" =~ ^all ]] && shift + + vols=$(gluster volume info | grep -P '^Volume Name' | awk '{ print $NF }' | tr '\r\n' ' ') + for vol in $vols; do + if [ $all -eq 0 ]; then + [[ "$vol" =~ [0-9] ]] && continue + [[ "$vol" =~ ^vol_ ]] && continue + [[ "$vol" =~ ^heketi ]] && continue + fi + + eval "$action" "$vol" "$@" + done +else + eval "$action" "$@" +fi diff --git a/scripts/maintain/gluster/opts.sh b/scripts/maintain/gluster/opts.sh new file mode 100644 index 0000000..2f76d8e --- /dev/null +++ b/scripts/maintain/gluster/opts.sh @@ -0,0 +1,21 @@ +function get_gluster_pod { + oc -n glusterfs get pods -l 'glusterfs=storage-pod' | grep Running | awk '{ print $1 }' | head -n 1 +} + +gpod=$(get_gluster_pod) + +function gluster { + oc -n glusterfs rsh po/$gpod gluster "$@" +} + + +function node { + ip=$1 + shift + + ssh -xq root@192.168.26.$ip "$@" +} + +function heketi { + node 1 heketi-cli -s http://heketi-storage.glusterfs.svc.cluster.local:8080 --user admin --secret "$(oc get secret heketi-storage-admin-secret -n glusterfs -o jsonpath='{.data.key}' | base64 -d)" "$@" +} diff --git a/scripts/maintain/prunerc.sh b/scripts/maintain/prunerc.sh new file mode 100644 index 0000000..166ede6 --- /dev/null +++ b/scripts/maintain/prunerc.sh @@ -0,0 +1,60 @@ +#! /bin/bash + +setup="" +commit=0 +prune_stopped=0 + +while [ -n "$1" ]; do + case "$1" in + -c | --commit ) commit=1; shift ;; + -s | --stopped ) prune_stopped=1; shift ;; + -n | --setup ) setup="-n $2"; shift 2;; + * ) echo "$0 [-n project] [--commit] [--stopped]"; break ;; + esac +done + + +dcs="$(oc $setup get dc | grep -v NAME)" + +while read -r dc; do + name=$(echo "$dc" | awk '{ print $1 }') + revision=$(echo "$dc" | awk '{ print $2 }') + dc_desired=$(echo "$dc" | awk '{ print $3 }') + dc_current=$(echo "$dc" | awk '{ print $4 }') + + rcname="$name-$revision" + rc="$(oc $setup get rc $rcname | grep -v NAME)" + desired=$(echo "$rc" | awk '{ print $2 }') + current=$(echo "$rc" | awk '{ print $3 }') + ready=$(echo "$rc" | awk '{ print $4 }') + + if [ $dc_desired -ne $dc_current ]; then + [ $commit -eq 1 ] && echo "Skipping faulty dc/$name" + continue + elif [ $desired -ne $current -o $desired -ne $ready ]; then + [ $commit -eq 1 ] && echo "Skipping not completely ready dc/$name" + continue + elif [ $desired -eq 0 -a $prune_stopped -ne 1 ]; then + [ $commit -eq 1 ] && echo "Skipping stopped dc/$name (last one could be faulty)" + continue + fi + + + rcs="$(oc $setup get rc -l openshift.io/deployment-config.name=$name | grep -v NAME)" + + while read -r oldrc; do + oldname=$(echo "$oldrc" | awk '{ print $1 }') + desired=$(echo "$oldrc" | awk '{ print $2 }') + + [ $oldname = $rcname ] && continue + [ $desired -ne 0 ] && continue + + if [ $commit -eq 1 ]; then + echo "Removing: $oldname (keeping $rcname[$ready])" + oc $setup delete rc "$oldname" + else + echo "Will remove: $oldname (keeping $rcname[$ready])" + fi + done <<< "$rcs" + +done <<< "$dcs" diff --git a/scripts/opts.sh b/scripts/opts.sh deleted file mode 100644 index 2f76d8e..0000000 --- a/scripts/opts.sh +++ /dev/null @@ -1,21 +0,0 @@ -function get_gluster_pod { - oc -n glusterfs get pods -l 'glusterfs=storage-pod' | grep Running | awk '{ print $1 }' | head -n 1 -} - -gpod=$(get_gluster_pod) - -function gluster { - oc -n glusterfs rsh po/$gpod gluster "$@" -} - - -function node { - ip=$1 - shift - - ssh -xq root@192.168.26.$ip "$@" -} - -function heketi { - node 1 heketi-cli -s http://heketi-storage.glusterfs.svc.cluster.local:8080 --user admin --secret "$(oc get secret heketi-storage-admin-secret -n glusterfs -o jsonpath='{.data.key}' | base64 -d)" "$@" -} diff --git a/scripts/provision/hawakular.sh b/scripts/provision/hawakular.sh new file mode 100755 index 0000000..73e3a87 --- /dev/null +++ b/scripts/provision/hawakular.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +case $1 in + "stop") + oc -n openshift-infra scale --replicas 0 rc/hawkular-metrics + oc -n openshift-infra scale --replicas 0 rc/heapster + oc -n openshift-infra scale --replicas 0 dc/hawkular-cassandra + ;; + "start") + oc -n openshift-infra scale --replicas 0 dc/hawkular-cassandra + sleep 1 + oc -n openshift-infra scale --replicas 0 rc/heapster + sleep 1 + oc -n openshift-infra scale --replicas 0 rc/hawkular-metrics + ;; + *) + echo "Usage: $0 stop/start" +esac diff --git a/scripts/provision/kube-ops-view.sh b/scripts/provision/kube-ops-view.sh new file mode 100755 index 0000000..ca1389e --- /dev/null +++ b/scripts/provision/kube-ops-view.sh @@ -0,0 +1,12 @@ +#! /bin/bash + +NS=mon + + +oc -n $NS new-project ocp-ops-view +oc -n $NS create sa kube-ops-view +oc -n $NS adm policy add-scc-to-user anyuid -z kube-ops-view +oc -n $NS adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:mon:kube-ops-view +oc -n $NS apply -f https://raw.githubusercontent.com/raffaelespazzoli/kube-ops-view/ocp/deploy-openshift/kube-ops-view.yaml +oc -n $NS expose svc kube-ops-view +oc -n $NS get route | grep kube-ops-view | awk '{print $2}' -- cgit v1.2.1