X-Git-Url: https://gerrit.onap.org/r/gitweb?a=blobdiff_plain;f=vnfs%2FDAaaS%2Fmicroservices%2Fcollectd-operator%2Fpkg%2Fcontroller%2Fcollectdplugin%2Fcollectdplugin_controller.go;h=11cf0bc12091903db8f65f42e62f12850d4e1c3f;hb=a4a006a7b3f687310e02975d99bf452215d441a6;hp=fd35c6f1dec6765693d522cdcdef3e897b42d94e;hpb=b7be24e3abb6d85e7e1745486509fdf6be010f72;p=demo.git diff --git a/vnfs/DAaaS/microservices/collectd-operator/pkg/controller/collectdplugin/collectdplugin_controller.go b/vnfs/DAaaS/microservices/collectd-operator/pkg/controller/collectdplugin/collectdplugin_controller.go index fd35c6f1..11cf0bc1 100644 --- a/vnfs/DAaaS/microservices/collectd-operator/pkg/controller/collectdplugin/collectdplugin_controller.go +++ b/vnfs/DAaaS/microservices/collectd-operator/pkg/controller/collectdplugin/collectdplugin_controller.go @@ -3,17 +3,23 @@ package collectdplugin import ( "context" "crypto/sha256" + "fmt" + "os" + "reflect" + "strings" + + "github.com/go-logr/logr" onapv1alpha1 "demo/vnfs/DAaaS/microservices/collectd-operator/pkg/apis/onap/v1alpha1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -25,8 +31,9 @@ var log = logf.Log.WithName("controller_collectdplugin") // ResourceMap to hold objects to update/reload type ResourceMap struct { - configMap *corev1.ConfigMap - daemonSet *extensionsv1beta1.DaemonSet + configMap *corev1.ConfigMap + daemonSet *extensionsv1beta1.DaemonSet + collectdPlugins *[]onapv1alpha1.CollectdPlugin } /** @@ -61,21 +68,33 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } - // TODO(user): Modify this to be the types you create that are owned by the primary resource - // Watch for changes to secondary resource Pods and requeue the owner CollectdPlugin - log.V(1).Info("Add watcher for secondary resource ConfigMap") - err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &onapv1alpha1.CollectdPlugin{}, - }) - if err != nil { - return err - } - - err = c.Watch(&source.Kind{Type: &extensionsv1beta1.DaemonSet{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &onapv1alpha1.CollectdPlugin{}, - }) + log.V(1).Info("Add watcher for secondary resource Collectd Daemonset") + err = c.Watch( + &source.Kind{Type: &appsv1.DaemonSet{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: handler.ToRequestsFunc(func (a handler.MapObject) []reconcile.Request { + labelSelector, err := getWatchLabels() + labels := strings.Split(labelSelector, "=") + if err != nil { + log.Error(err, "Failed to get watch labels, continuing with default label") + } + rcp := r.(*ReconcileCollectdPlugin) + // Select the Daemonset with labelSelector (Defautl is app=collectd) + if a.Meta.GetLabels()[labels[0]] == labels[1] { + var requests []reconcile.Request + cpList, err := rcp.getCollectdPluginList(a.Meta.GetNamespace()) + if err != nil { + return nil + } + for _, cp := range cpList.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: client.ObjectKey{Namespace: cp.Namespace, Name: cp.Name}}) + } + return requests + } + return nil + }), + }) if err != nil { return err } @@ -83,6 +102,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return nil } + // blank assignment to verify that ReconcileCollectdPlugin implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileCollectdPlugin{} @@ -94,6 +114,17 @@ type ReconcileCollectdPlugin struct { scheme *runtime.Scheme } +// Define the collectdPlugin finalizer for handling deletion +const ( + defaultWatchLabel = "app=collectd" + collectdPluginFinalizer = "finalizer.collectdplugin.onap.org" + + // WatchLabelsEnvVar is the constant for env variable WATCH_LABELS + // which is the labels where the watch activity happens. + // this value is empty if the operator is running with clusterScope. + WatchLabelsEnvVar = "WATCH_LABELS" +) + // Reconcile reads that state of the cluster for a CollectdPlugin object and makes changes based on the state read // and what is in the CollectdPlugin.Spec // TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates @@ -121,95 +152,321 @@ func (r *ReconcileCollectdPlugin) Reconcile(request reconcile.Request) (reconcil return reconcile.Result{}, err } - rmap, err := findResourceMapForCR(r, instance) - if err != nil { - reqLogger.Info("Skip reconcile: ConfigMap not found") + // Handle Delete CR for additional cleanup + isDelete, err := r.handleDelete(reqLogger, instance) + if isDelete { return reconcile.Result{}, err } - cm := rmap.configMap - ds := rmap.daemonSet - reqLogger.V(1).Info("Found ResourceMap") - reqLogger.V(1).Info("ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name) - reqLogger.V(1).Info("DaemonSet.Namespace", ds.Namespace, "DaemonSet.Name", ds.Name) - // Set CollectdPlugin instance as the owner and controller - if err := controllerutil.SetControllerReference(instance, cm, r.scheme); err != nil { - return reconcile.Result{}, err + // Add finalizer for this CR + if !contains(instance.GetFinalizers(), collectdPluginFinalizer) { + if err := r.addFinalizer(reqLogger, instance); err != nil { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil } - // Set CollectdConf instance as the owner and controller - if err := controllerutil.SetControllerReference(instance, ds, r.scheme); err != nil { - return reconcile.Result{}, err + // Handle the reconciliation for CollectdPlugin. + // At this stage the Status of the CollectdPlugin should NOT be "" + err = r.handleCollectdPlugin(reqLogger, instance, false) + return reconcile.Result{}, err +} + +// handleCollectdPlugin regenerates the collectd conf on CR Create, Update, Delete events +func (r *ReconcileCollectdPlugin) handleCollectdPlugin(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin, isDelete bool) error { + + rmap, err := r.findResourceMapForCR(reqLogger, cr) + if err != nil { + reqLogger.Error(err, "Skip reconcile: Resources not found") + return err } + cm := rmap.configMap + collectPlugins := rmap.collectdPlugins + reqLogger.V(1).Info("Found ResourceMap") + reqLogger.V(1).Info(":::: ConfigMap Info ::::", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name) + + collectdConf, err := rebuildCollectdConf(cr, collectPlugins, isDelete) + + cm.SetAnnotations(map[string]string{ + "daaas-random": ComputeSHA256([]byte(collectdConf)), + }) + + cm.Data["node-collectd.conf"] = collectdConf + // Update the ConfigMap with new Spec and reload DaemonSets reqLogger.Info("Updating the ConfigMap", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name) - log.Info("Map: ", cm.Data) + log.V(1).Info("ConfigMap Data", "Map: ", cm.Data) err = r.client.Update(context.TODO(), cm) if err != nil { - return reconcile.Result{}, err + reqLogger.Error(err, "Update the ConfigMap failed", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name) + return err } - //Restart Collectd Pods + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Daemonset before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + // Select DaemonSets with label + dsList := &extensionsv1beta1.DaemonSetList{} + opts := &client.ListOptions{} + labelSelector, err := getWatchLabels() + if err != nil { + reqLogger.Error(err, "Failed to get watch labels, continuing with default label") + } + opts.SetLabelSelector(labelSelector) + opts.InNamespace(cr.Namespace) + err = r.client.List(context.TODO(), opts, dsList) + if err != nil { + panic(fmt.Errorf("Failed to get latest version of DaemonSet: %v", err)) + } - ds.Spec.Template.SetLabels(map[string]string{ - "daaas-random": ComputeSHA256([]byte("TEST")), + if dsList.Items == nil || len(dsList.Items) == 0 { + return errors.NewNotFound(corev1.Resource("daemonset"), "DaemonSet") + } + ds := &dsList.Items[0] + //Restart Collectd Pods + reqLogger.Info("Reloading the Daemonset", "DaemonSet.Namespace", ds.Namespace, "DaemonSet.Name", ds.Name) + //Restart only if hash of conf has changed. + ds.Spec.Template.SetAnnotations(map[string]string{ + "daaas-random": ComputeSHA256([]byte(collectdConf)), + }) + updateErr := r.client.Update(context.TODO(), ds) + return updateErr }) + if retryErr != nil { + panic(fmt.Errorf("Update failed: %v", retryErr)) + } + + err = r.updateStatus(cr) + if err != nil { + reqLogger.Error(err, "Unable to update status") + return err + } // Reconcile success - reqLogger.Info("Updated the ConfigMap", "ConfigMap.Namespace", cm.Namespace, "ConfigMap.Name", cm.Name) - return reconcile.Result{}, nil + reqLogger.Info("Reconcile success!!") + return nil } // ComputeSHA256 returns hash of data as string func ComputeSHA256(data []byte) string { hash := sha256.Sum256(data) - return string(hash[:]) + return fmt.Sprintf("%x", hash) } -// findConfigMapForCR returns the configMap used by collectd Daemonset -func findResourceMapForCR(r *ReconcileCollectdPlugin, cr *onapv1alpha1.CollectdPlugin) (ResourceMap, error) { +// findResourceMapForCR returns the configMap, collectd Daemonset and list of Collectd Plugins +func (r *ReconcileCollectdPlugin) findResourceMapForCR(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) (ResourceMap, error) { cmList := &corev1.ConfigMapList{} opts := &client.ListOptions{} rmap := ResourceMap{} - // Select ConfigMaps with label app=collectd - opts.SetLabelSelector("app=collectd") + // Select ConfigMaps with label + labelSelector, err := getWatchLabels() + if err != nil { + reqLogger.Error(err, "Failed to get watch labels, continuing with default label") + } + opts.SetLabelSelector(labelSelector) opts.InNamespace(cr.Namespace) - err := r.client.List(context.TODO(), opts, cmList) + + err = r.client.List(context.TODO(), opts, cmList) if err != nil { return rmap, err } - // Select DaemonSets with label app=collectd - dsList := &extensionsv1beta1.DaemonSet{} + if cmList.Items == nil || len(cmList.Items) == 0 { + return rmap, errors.NewNotFound(corev1.Resource("configmap"), "ConfigMap") + } + + // Select DaemonSets with label + dsList := &extensionsv1beta1.DaemonSetList{} err = r.client.List(context.TODO(), opts, dsList) if err != nil { return rmap, err } + if dsList.Items == nil || len(dsList.Items) == 0 { + return rmap, errors.NewNotFound(corev1.Resource("daemonset"), "DaemonSet") + } + + // Get all collectd plugins in the current namespace to rebuild conf. + cpList, err := r.getCollectdPluginList(cr.Namespace) + if err != nil { + return rmap, err + } + rmap.configMap = &cmList.Items[0] - rmap.daemonSet = dsList + rmap.daemonSet = &dsList.Items[0] + rmap.collectdPlugins = &cpList.Items //will be nil if no plugins exist return rmap, err } -// newPodForCR returns a busybox pod with the same name/namespace as the cr -func newPodForCR(cr *onapv1alpha1.CollectdPlugin) *corev1.Pod { - labels := map[string]string{ - "app": cr.Name, - } - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: cr.Name + "-pod", - Namespace: cr.Namespace, - Labels: labels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "busybox", - Command: []string{"sleep", "3600"}, - }, - }, - }, +// Get all collectd plugins and reconstruct, compute Hash and check for changes +func rebuildCollectdConf(cr *onapv1alpha1.CollectdPlugin, cpList *[]onapv1alpha1.CollectdPlugin, isDelete bool) (string, error) { + var collectdConf string + if *cpList == nil || len(*cpList) == 0 { + return "", errors.NewNotFound(corev1.Resource("collectdplugin"), "CollectdPlugin") + } + loadPlugin := make(map[string]string) + for _, cp := range *cpList { + if cp.Spec.PluginName == "global" { + collectdConf += cp.Spec.PluginConf + "\n" + } else { + loadPlugin[cp.Spec.PluginName] = cp.Spec.PluginConf + } + } + + if isDelete { + delete(loadPlugin, cr.Spec.PluginName) + } + + log.V(1).Info("::::::: Plugins Map ::::::: ", "PluginMap ", loadPlugin) + + for cpName, cpConf := range loadPlugin { + collectdConf += "LoadPlugin" + " " + cpName + "\n" + collectdConf += cpConf + "\n" + } + + collectdConf += "#Last line (collectd requires ‘\\n’ at the last line)\n" + + return collectdConf, nil +} + +// Handle Delete CR event for additional cleanup +func (r *ReconcileCollectdPlugin) handleDelete(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) (bool, error) { + // Check if the CollectdPlugin instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. + isMarkedToBeDeleted := cr.GetDeletionTimestamp() != nil + if isMarkedToBeDeleted { + // Update status to Deleting state + cr.Status.Status = onapv1alpha1.Deleting + cr.Status.CollectdAgents = nil + _ = r.client.Status().Update(context.TODO(), cr) + + if contains(cr.GetFinalizers(), collectdPluginFinalizer) { + // Run finalization logic for collectdPluginFinalizer. If the + // finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + if err := r.finalizeCollectdPlugin(reqLogger, cr); err != nil { + return isMarkedToBeDeleted, err + } + + // Remove collectdPluginFinalizer. Once all finalizers have been + // removed, the object will be deleted. + cr.SetFinalizers(remove(cr.GetFinalizers(), collectdPluginFinalizer)) + err := r.client.Update(context.TODO(), cr) + if err != nil { + return isMarkedToBeDeleted, err + } + } + } + return isMarkedToBeDeleted, nil +} + +func (r *ReconcileCollectdPlugin) updateStatus(cr *onapv1alpha1.CollectdPlugin) error { + switch cr.Status.Status { + case onapv1alpha1.Initial: + cr.Status.Status = onapv1alpha1.Created + case onapv1alpha1.Created, onapv1alpha1.Enabled: + pods, err := r.getPodList(cr.Namespace) + if err != nil { + return err + } + if !reflect.DeepEqual(pods, cr.Status.CollectdAgents) { + cr.Status.CollectdAgents = pods + cr.Status.Status = onapv1alpha1.Enabled + } + case onapv1alpha1.Deleting, onapv1alpha1.Deprecated: + return nil + } + err := r.client.Status().Update(context.TODO(), cr) + return err +} + +func (r *ReconcileCollectdPlugin) finalizeCollectdPlugin(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) error { + // Cleanup by regenerating new collectd conf and rolling update of DaemonSet + if err := r.handleCollectdPlugin(reqLogger, cr, true); err != nil { + reqLogger.Error(err, "Finalize CollectdPlugin failed!!") + return err + } + reqLogger.Info("Successfully finalized CollectdPlugin!!") + return nil +} + +func (r *ReconcileCollectdPlugin) addFinalizer(reqLogger logr.Logger, cr *onapv1alpha1.CollectdPlugin) error { + reqLogger.Info("Adding Finalizer for the CollectdPlugin") + cr.SetFinalizers(append(cr.GetFinalizers(), collectdPluginFinalizer)) + // Update status from Initial to Created + // Since addFinalizer will be executed only once, + // the status will be changed from Initial state to Created + updateErr := r.updateStatus(cr) + if updateErr != nil { + reqLogger.Error(updateErr, "Failed to update status from Initial state") + } + // Update CR + err := r.client.Update(context.TODO(), cr) + if err != nil { + reqLogger.Error(err, "Failed to update CollectdPlugin with finalizer") + return err + } + return nil +} + +func contains(list []string, s string) bool { + for _, v := range list { + if v == s { + return true + } + } + return false +} + +func remove(list []string, s string) []string { + for i, v := range list { + if v == s { + list = append(list[:i], list[i+1:]...) + } + } + return list +} + +// getWatchLabels returns the labels the operator should be watching for changes +func getWatchLabels() (string, error) { + labelSelector, found := os.LookupEnv(WatchLabelsEnvVar) + if !found { + return defaultWatchLabel, fmt.Errorf("%s must be set", WatchLabelsEnvVar) + } + return labelSelector, nil +} + +func (r *ReconcileCollectdPlugin) getPodList(ns string) ([]string, error) { + var pods []string + podList := &corev1.PodList{} + opts := &client.ListOptions{} + // Select ConfigMaps with label + labelSelector, _ := getWatchLabels() + opts.SetLabelSelector(labelSelector) + opts.InNamespace(ns) + err := r.client.List(context.TODO(), opts, podList) + if err != nil { + return nil, err + } + + if podList.Items == nil || len(podList.Items) == 0 { + return nil, err + } + + for _, pod := range podList.Items { + pods = append(pods, pod.Name) + } + return pods, nil +} + +func (r *ReconcileCollectdPlugin) getCollectdPluginList(ns string) (*onapv1alpha1.CollectdPluginList, error) { + // Get all collectd plugins in the current namespace to rebuild conf. + collectdPlugins := &onapv1alpha1.CollectdPluginList{} + cpOpts := &client.ListOptions{} + cpOpts.InNamespace(ns) + err := r.client.List(context.TODO(), cpOpts, collectdPlugins) + if err != nil { + return nil, err } + return collectdPlugins, nil }