import (
"context"
"crypto/sha256"
+ "fmt"
+ "github.com/go-logr/logr"
+ "os"
- onapv1alpha1 "demo/vnfs/DAaaS/collectd-operator/pkg/apis/onap/v1alpha1"
+ onapv1alpha1 "demo/vnfs/DAaaS/microservices/collectd-operator/pkg/apis/onap/v1alpha1"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
// ResourceMap to hold objects to update/reload
type ResourceMap struct {
- configMap *corev1.ConfigMap
- daemonSet *extensionsv1beta1.DaemonSet
+ configMap *corev1.ConfigMap
+ daemonSet *extensionsv1beta1.DaemonSet
+ collectdPlugins *[]onapv1alpha1.CollectdPlugin
}
/**
return err
}
- // TODO(user): Modify this to be the types you create that are owned by the primary resource
- // Watch for changes to secondary resource Pods and requeue the owner CollectdPlugin
- log.V(1).Info("Add watcher for secondary resource ConfigMap")
- err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
- IsController: true,
- OwnerType: &onapv1alpha1.CollectdPlugin{},
- })
- if err != nil {
- return err
- }
-
- err = c.Watch(&source.Kind{Type: &extensionsv1beta1.DaemonSet{}}, &handler.EnqueueRequestForOwner{
- IsController: true,
- OwnerType: &onapv1alpha1.CollectdPlugin{},
- })
- if err != nil {
- return err
- }
-
return nil
}
scheme *runtime.Scheme
}
+// Define the collectdPlugin finalizer for handling deletion
+const (
+ defaultWatchLabel = "app=collectd"
+ collectdPluginFinalizer = "finalizer.collectdplugin.onap.org"
+
+ // WatchLabelsEnvVar is the constant for env variable WATCH_LABELS
+ // which is the labels where the watch activity happens.
+ // this value is empty if the operator is running with clusterScope.
+ WatchLabelsEnvVar = "WATCH_LABELS"
+)
+
// Reconcile reads that state of the cluster for a CollectdPlugin object and makes changes based on the state read
// and what is in the CollectdPlugin.Spec
// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
return reconcile.Result{}, err
}
- rmap, err := findResourceMapForCR(r, instance)
- if err != nil {
- reqLogger.Info("Skip reconcile: ConfigMap not found")
+ // Handle Delete CR for additional cleanup
+ isDelete, err := r.handleDelete(reqLogger, instance)
+ if isDelete {
return reconcile.Result{}, err
}
+ // Add finalizer for this CR
+ if !contains(instance.GetFinalizers(), collectdPluginFinalizer) {
+ if err := r.addFinalizer(reqLogger, instance); err != nil {
+ return reconcile.Result{}, err
+ }
+ return reconcile.Result{}, nil
+ }
+ err = r.handleCollectdPlugin(reqLogger, instance, false)
+ return reconcile.Result{}, err
+}
+
+// handleCollectdPlugin regenerates the collectd conf on CR Create, Update, Delete events
+func (r *ReconcileCollectdPlugin) handleCollectdPlugin(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin, isDelete bool) error {
+
+ rmap, err := r.findResourceMapForCR(reqLogger, cr)
+ if err != nil {
+ reqLogger.Error(err, "Skip reconcile: Resources not found")
+ return err
+ }
+
cm := rmap.configMap
ds := rmap.daemonSet
+ collectPlugins := rmap.collectdPlugins
reqLogger.V(1).Info("Found ResourceMap")
- reqLogger.V(1).Info("ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name)
- reqLogger.V(1).Info("DaemonSet.Namespace", ds.Namespace, "DaemonSet.Name", ds.Name)
- // Set CollectdPlugin instance as the owner and controller
- if err := controllerutil.SetControllerReference(instance, cm, r.scheme); err != nil {
- return reconcile.Result{}, err
- }
- // Set CollectdConf instance as the owner and controller
- if err := controllerutil.SetControllerReference(instance, ds, r.scheme); err != nil {
- return reconcile.Result{}, err
- }
+ reqLogger.V(1).Info(":::: ConfigMap Info ::::", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name)
+ reqLogger.V(1).Info(":::: DaemonSet Info ::::", "DaemonSet.Namespace", ds.Namespace, "DaemonSet.Name", ds.Name)
+
+ collectdConf, err := rebuildCollectdConf(cr, collectPlugins, isDelete)
+
+ //Restart Collectd Pods
+ //Restart only if hash of configmap has changed.
+ ds.Spec.Template.SetAnnotations(map[string]string{
+ "daaas-random": ComputeSHA256([]byte(collectdConf)),
+ })
+ cm.SetAnnotations(map[string]string{
+ "daaas-random": ComputeSHA256([]byte(collectdConf)),
+ })
+
+ cm.Data["node-collectd.conf"] = collectdConf
// Update the ConfigMap with new Spec and reload DaemonSets
reqLogger.Info("Updating the ConfigMap", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name)
- log.Info("Map: ", cm.Data)
+ log.V(1).Info("ConfigMap Data", "Map: ", cm.Data)
err = r.client.Update(context.TODO(), cm)
if err != nil {
- return reconcile.Result{}, err
+ reqLogger.Error(err, "Update the ConfigMap failed", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name)
+ return err
}
- //Restart Collectd Pods
-
- ds.Spec.Template.SetLabels(map[string]string{
- "daaas-random": ComputeSHA256([]byte("TEST")),
- })
+ reqLogger.Info("Reloading the Daemonset", "DaemonSet.Namespace", ds.Namespace, "DaemonSet.Name", ds.Name)
+ err = r.client.Update(context.TODO(), ds)
+ if err != nil {
+ reqLogger.Error(err, "Update the DaemonSet failed", "DaemonSet.Namespace", ds.Namespace, "DaemonSet.Name", ds.Name)
+ return err
+ }
+ r.updateStatus(cr)
// Reconcile success
- reqLogger.Info("Updated the ConfigMap", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name)
- return reconcile.Result{}, nil
+ reqLogger.Info("Reconcile success!!")
+ return nil
}
// ComputeSHA256 returns hash of data as string
func ComputeSHA256(data []byte) string {
hash := sha256.Sum256(data)
- return string(hash[:])
+ return fmt.Sprintf("%x", hash)
}
-// findConfigMapForCR returns the configMap used by collectd Daemonset
-func findResourceMapForCR(r *ReconcileCollectdPlugin, cr *onapv1alpha1.CollectdPlugin) (ResourceMap, error) {
+// findResourceMapForCR returns the configMap, collectd Daemonset and list of Collectd Plugins
+func (r *ReconcileCollectdPlugin) findResourceMapForCR(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) (ResourceMap, error) {
cmList := &corev1.ConfigMapList{}
opts := &client.ListOptions{}
rmap := ResourceMap{}
- // Select ConfigMaps with label app=collectd
- opts.SetLabelSelector("app=collectd")
+ // Select ConfigMaps with label
+ labelSelector, err := getWatchLabels()
+ if err != nil {
+ reqLogger.Error(err, "Failed to get watch labels, continuing with default label")
+ }
+ opts.SetLabelSelector(labelSelector)
opts.InNamespace(cr.Namespace)
- err := r.client.List(context.TODO(), opts, cmList)
+
+ err = r.client.List(context.TODO(), opts, cmList)
if err != nil {
return rmap, err
}
- // Select DaemonSets with label app=collectd
- dsList := &extensionsv1beta1.DaemonSet{}
+ if cmList.Items == nil || len(cmList.Items) == 0 {
+ return rmap, errors.NewNotFound(corev1.Resource("configmap"), "ConfigMap")
+ }
+
+ // Select DaemonSets with label
+ dsList := &extensionsv1beta1.DaemonSetList{}
err = r.client.List(context.TODO(), opts, dsList)
if err != nil {
return rmap, err
}
+ if dsList.Items == nil || len(dsList.Items) == 0 {
+ return rmap, errors.NewNotFound(corev1.Resource("daemonset"), "DaemonSet")
+ }
+
+ // Get all collectd plugins in the current namespace to rebuild conf.
+ collectdPlugins := &onapv1alpha1.CollectdPluginList{}
+ cpOpts := &client.ListOptions{}
+ cpOpts.InNamespace(cr.Namespace)
+ err = r.client.List(context.TODO(), cpOpts, collectdPlugins)
+ if err != nil {
+ return rmap, err
+ }
+
rmap.configMap = &cmList.Items[0]
- rmap.daemonSet = dsList
+ rmap.daemonSet = &dsList.Items[0]
+ rmap.collectdPlugins = &collectdPlugins.Items //will be nil if no plugins exist
return rmap, err
}
-// newPodForCR returns a busybox pod with the same name/namespace as the cr
-func newPodForCR(cr *onapv1alpha1.CollectdPlugin) *corev1.Pod {
- labels := map[string]string{
- "app": cr.Name,
- }
- return &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: cr.Name + "-pod",
- Namespace: cr.Namespace,
- Labels: labels,
- },
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
- {
- Name: "busybox",
- Image: "busybox",
- Command: []string{"sleep", "3600"},
- },
- },
- },
+// Get all collectd plugins and reconstruct, compute Hash and check for changes
+func rebuildCollectdConf(cr *onapv1alpha1.CollectdPlugin, cpList *[]onapv1alpha1.CollectdPlugin, isDelete bool) (string, error) {
+ var collectdConf string
+ if *cpList == nil || len(*cpList) == 0 {
+ return "", errors.NewNotFound(corev1.Resource("collectdplugin"), "CollectdPlugin")
+ }
+ loadPlugin := make(map[string]string)
+ for _, cp := range *cpList {
+ if cp.Spec.PluginName == "global" {
+ collectdConf += cp.Spec.PluginConf + "\n"
+ } else {
+ loadPlugin[cp.Spec.PluginName] = cp.Spec.PluginConf
+ }
+ }
+
+ if isDelete {
+ delete(loadPlugin, cr.Spec.PluginName)
+ }
+
+ log.V(1).Info("::::::: Plugins Map ::::::: ", "PluginMap ", loadPlugin)
+
+ for cpName, cpConf := range loadPlugin {
+ collectdConf += "LoadPlugin" + " " + cpName + "\n"
+ collectdConf += cpConf + "\n"
+ }
+
+ collectdConf += "#Last line (collectd requires ā\\nā at the last line)\n"
+
+ return collectdConf, nil
+}
+
+// Handle Delete CR event for additional cleanup
+func (r *ReconcileCollectdPlugin) handleDelete(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) (bool, error) {
+ // Check if the CollectdPlugin instance is marked to be deleted, which is
+ // indicated by the deletion timestamp being set.
+ isMarkedToBeDeleted := cr.GetDeletionTimestamp() != nil
+ if isMarkedToBeDeleted {
+ if contains(cr.GetFinalizers(), collectdPluginFinalizer) {
+ // Run finalization logic for collectdPluginFinalizer. If the
+ // finalization logic fails, don't remove the finalizer so
+ // that we can retry during the next reconciliation.
+ if err := r.finalizeCollectdPlugin(reqLogger, cr); err != nil {
+ return isMarkedToBeDeleted, err
+ }
+
+ // Remove collectdPluginFinalizer. Once all finalizers have been
+ // removed, the object will be deleted.
+ cr.SetFinalizers(remove(cr.GetFinalizers(), collectdPluginFinalizer))
+ err := r.client.Update(context.TODO(), cr)
+ if err != nil {
+ return isMarkedToBeDeleted, err
+ }
+ }
+ }
+ return isMarkedToBeDeleted, nil
+}
+
+func (r *ReconcileCollectdPlugin) updateStatus(cr *onapv1alpha1.CollectdPlugin) error {
+ podList := &corev1.PodList{}
+ opts := &client.ListOptions{}
+ // Select ConfigMaps with label
+ labelSelector, _ := getWatchLabels()
+ opts.SetLabelSelector(labelSelector)
+ var pods []string
+ opts.InNamespace(cr.Namespace)
+ err := r.client.List(context.TODO(), opts, podList)
+ if err != nil {
+ return err
+ }
+
+ if podList.Items == nil || len(podList.Items) == 0 {
+ return err
+ }
+
+ for _, pod := range podList.Items {
+ pods = append(pods, pod.Name)
+ }
+ cr.Status.CollectdAgents = pods
+ err = r.client.Status().Update(context.TODO(), cr)
+ return err
+}
+
+func (r *ReconcileCollectdPlugin) finalizeCollectdPlugin(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) error {
+ // Cleanup by regenerating new collectd conf and rolling update of DaemonSet
+ if err := r.handleCollectdPlugin(reqLogger, cr, true); err != nil {
+ reqLogger.Error(err, "Finalize CollectdPlugin failed!!")
+ return err
+ }
+ reqLogger.Info("Successfully finalized CollectdPlugin!!")
+ return nil
+}
+
+func (r *ReconcileCollectdPlugin) addFinalizer(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) error {
+ reqLogger.Info("Adding Finalizer for the CollectdPlugin")
+ cr.SetFinalizers(append(cr.GetFinalizers(), collectdPluginFinalizer))
+
+ // Update CR
+ err := r.client.Update(context.TODO(), cr)
+ if err != nil {
+ reqLogger.Error(err, "Failed to update CollectdPlugin with finalizer")
+ return err
+ }
+ return nil
+}
+
+func contains(list []string, s string) bool {
+ for _, v := range list {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+func remove(list []string, s string) []string {
+ for i, v := range list {
+ if v == s {
+ list = append(list[:i], list[i+1:]...)
+ }
+ }
+ return list
+}
+
+// getWatchLabels returns the labels the operator should be watching for changes
+func getWatchLabels() (string, error) {
+ labelSelector, found := os.LookupEnv(WatchLabelsEnvVar)
+ if !found {
+ return defaultWatchLabel, fmt.Errorf("%s must be set", WatchLabelsEnvVar)
}
+ return labelSelector, nil
}