[TOOLS] Use release's chart manifest to collect kubernetes object 74/122274/2
authorBartek Grzybowski <b.grzybowski@partner.samsung.com>
Mon, 28 Jun 2021 13:06:58 +0000 (15:06 +0200)
committerBartek Grzybowski <b.grzybowski@partner.samsung.com>
Thu, 1 Jul 2021 11:50:16 +0000 (11:50 +0000)
Since Helm v3 is no longer showing kubernetes objects associated with a release,
charts manifest is used to get those.

Ref.: https://github.com/helm/helm/issues/5952

Change-Id: Idf16124663186b8d5b8ce2b408bdf7d399b12b48
Issue-ID: OOM-2770
Signed-off-by: Bartek Grzybowski <b.grzybowski@partner.samsung.com>
tools/helm-healer.sh

index a6870fe..650c7d1 100755 (executable)
@@ -222,34 +222,7 @@ delete_job()
 #arg: <component>
 get_resources_for_component()
 {
-
-helm -n ${NAMESPACE} status $1 | awk -f <(cat - <<-'EOD'
-BEGIN {
-  work="no"
-  kind=""
-  a["dummy"]=""
-}
-
-$1 ~ ":" {
-  if ( $1 == "RESOURCES:" ) {
-          work="yes"
-} else {
-  work="no"
-}
-
-}
-
-$1 == "==>" {
-  split($2, a, "[/(]")
-  kind=a[2]
-}
-
-$1 != "NAME" && $1 != "==>" && work == "yes" && $1 !~ ":" && $1 != "" {
-  printf "%s/%s\n", kind, $1
-}
-
-EOD
-)
+    helm -n ${NAMESPACE} get manifest $1 | kubectl -n ${NAMESPACE} get -f - | awk '{print $1}' | grep -v NAME | grep -v ^$
 }
 
 # arg: <resource>
@@ -259,8 +232,7 @@ delete_resource()
     local _kind="${_resource%/*}"
     local _name="${_resource#*/}"
 
-
-    if kubectl get ${_resource} >/dev/null 2>&1; then
+    if kubectl -n ${NAMESPACE} get ${_resource} >/dev/null 2>&1; then
         msg "${_resource} has not been removed with helm undeploy, manual removal is required. Proceeding"
         kubectl delete ${_resource} -n ${NAMESPACE} \
             --cascade=true \
@@ -378,19 +350,18 @@ undeploy_component()
 
     for resource in ${_component_resources[@]}; do
         case $resource in
-            CronJob/* | Job/* | Secret/* | ConfigMap/* | Pod/* | Service/* | Deployment/* | StatefulSet/*)
+            cronjob/* | job.batch/* | secret/* | configmap/* | service/* | deployment.apps/* | statefulset.apps/* | serviceaccount/* | rolebinding.rbac.authorization.k8s.io/* | role.rbac.authorization.k8s.io/* | poddisruptionbudget.policy/* | clusterrolebinding.rbac.authorization.k8s.io/*)
                 _standard+=(${resource});;
             #Ignoring PVC, they will be handled along with PV as 'helm' status does not return them for some components
-            PersistentVolumeClaim/*)
+            persistentvolumeclaim/*)
                 ;;
-            PersistentVolume/*)
+            persistentvolume/*)
                 _persistent_volumes+=(${resource});;
             *)
                 _unknown_kinds+=(${resource})
         esac
     done
 
-
     #Gathering physical location of directories for persistent volumes to delete them after undeploy
     declare -a _physical_locations
     for volume in ${_persistent_volumes[@]}; do