}
// getPodsByLabel yields status of all pods under given instance ID
-func (k *KubernetesClient) getPodsByLabel(namespace string) ([]ResourceStatus, error) {
+func (k *KubernetesClient) getPodsByLabel(ctx context.Context, namespace string) ([]ResourceStatus, error) {
client := k.GetStandardClient().CoreV1().Pods(namespace)
listOpts := metav1.ListOptions{
LabelSelector: config.GetConfiguration().KubernetesLabelName + "=" + k.instanceID,
}
- podList, err := client.List(context.TODO(), listOpts)
+ podList, err := client.List(ctx, listOpts)
if err != nil {
return nil, pkgerrors.Wrap(err, "Retrieving PodList from cluster")
}
return resp, nil
}
-func (k *KubernetesClient) queryResources(apiVersion, kind, labelSelector, namespace string) ([]ResourceStatus, error) {
+func (k *KubernetesClient) queryResources(ctx context.Context, apiVersion, kind, labelSelector, namespace string) ([]ResourceStatus, error) {
+ ctx, span := tracer.Start(ctx, "KubernetesClient.queryResources")
+ defer span.End()
+
dynClient := k.GetDynamicClient()
mapper := k.GetMapper()
gvk := schema.FromAPIVersionAndKind(apiVersion, kind)
LabelSelector: labelSelector,
}
var unstrList *unstructured.UnstructuredList
- dynClient.Resource(gvr).Namespace(namespace).List(context.TODO(), opts)
+ dynClient.Resource(gvr).Namespace(namespace).List(ctx, opts)
switch mapping.Scope.Name() {
case meta.RESTScopeNameNamespace:
- unstrList, err = dynClient.Resource(gvr).Namespace(namespace).List(context.TODO(), opts)
+ unstrList, err = dynClient.Resource(gvr).Namespace(namespace).List(ctx, opts)
case meta.RESTScopeNameRoot:
- unstrList, err = dynClient.Resource(gvr).List(context.TODO(), opts)
+ unstrList, err = dynClient.Resource(gvr).List(ctx, opts)
default:
return nil, pkgerrors.New("Got an unknown RESTScopeName for mapping: " + gvk.String())
}
}
// GetResourcesStatus yields status of given generic resource
-func (k *KubernetesClient) GetResourceStatus(res helm.KubernetesResource, namespace string) (ResourceStatus, error) {
+func (k *KubernetesClient) GetResourceStatus(ctx context.Context, res helm.KubernetesResource, namespace string) (ResourceStatus, error) {
+ ctx, span := tracer.Start(ctx, "KubernetesClient.GetResourceStatus")
+ defer span.End()
dynClient := k.GetDynamicClient()
mapper := k.GetMapper()
mapping, err := mapper.RESTMapping(schema.GroupKind{
var unstruct *unstructured.Unstructured
switch mapping.Scope.Name() {
case meta.RESTScopeNameNamespace:
- unstruct, err = dynClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), res.Name, opts)
+ unstruct, err = dynClient.Resource(gvr).Namespace(namespace).Get(ctx, res.Name, opts)
case meta.RESTScopeNameRoot:
- unstruct, err = dynClient.Resource(gvr).Get(context.TODO(), res.Name, opts)
+ unstruct, err = dynClient.Resource(gvr).Get(ctx, res.Name, opts)
default:
return ResourceStatus{}, pkgerrors.New("Got an unknown RESTSCopeName for mapping: " + res.GVK.String())
}
// getKubeConfig uses the connectivity client to get the kubeconfig based on the name
// of the cloudregion. This is written out to a file.
-func (k *KubernetesClient) getKubeConfig(cloudregion string) (string, error) {
+func (k *KubernetesClient) getKubeConfig(ctx context.Context, cloudregion string) (string, error) {
+ ctx, span := tracer.Start(ctx, "KubernetesClient.getKubeConfig")
+ defer span.End()
conn := connection.NewConnectionClient()
- kubeConfigPath, err := conn.Download(context.TODO(), cloudregion)
+ kubeConfigPath, err := conn.Download(ctx, cloudregion)
if err != nil {
return "", pkgerrors.Wrap(err, "Downloading kubeconfig")
}
return kubeConfigPath, nil
}
-// Init loads the Kubernetes configuation values stored into the local configuration file
-func (k *KubernetesClient) Init(cloudregion string, iid string) error {
+// Init loads the Kubernetes configuration values stored into the local configuration file
+func (k *KubernetesClient) Init(ctx context.Context, cloudregion string, iid string) error {
+ ctx, span := tracer.Start(ctx, "KubernetesClient.Init")
+ defer span.End()
+
if cloudregion == "" {
return pkgerrors.New("Cloudregion is empty")
}
k.instanceID = iid
- configPath, err := k.getKubeConfig(cloudregion)
+ configPath, err := k.getKubeConfig(ctx, cloudregion)
if err != nil {
return pkgerrors.Wrap(err, "Get kubeconfig file")
}
return nil
}
-func (k *KubernetesClient) ensureNamespace(namespace string) error {
+func (k *KubernetesClient) ensureNamespace(ctx context.Context, namespace string) error {
+ _, span := tracer.Start(ctx, "KubernetesClient.ensureNamespace")
+ defer span.End()
pluginImpl, err := plugin.GetPluginByKind("Namespace")
if err != nil {
return nil
}
-func (k *KubernetesClient) CreateKind(resTempl helm.KubernetesResourceTemplate, namespace string) (helm.KubernetesResource, error) {
+func (k *KubernetesClient) CreateKind(ctx context.Context, resTempl helm.KubernetesResourceTemplate, namespace string) (helm.KubernetesResource, error) {
+ _, span := tracer.Start(ctx, "KubernetesClient.CreateKind")
+ defer span.End()
if _, err := os.Stat(resTempl.FilePath); os.IsNotExist(err) {
return helm.KubernetesResource{}, pkgerrors.New("File " + resTempl.FilePath + "does not exists")
}, nil
}
-func (k *KubernetesClient) updateKind(resTempl helm.KubernetesResourceTemplate,
+func (k *KubernetesClient) updateKind(ctx context.Context, resTempl helm.KubernetesResourceTemplate,
namespace string, createIfDoNotExist bool) (helm.KubernetesResource, error) {
+ _, span := tracer.Start(ctx, "KubernetesClient.updateKind")
+ defer span.End()
if _, err := os.Stat(resTempl.FilePath); os.IsNotExist(err) {
return helm.KubernetesResource{}, pkgerrors.New("File " + resTempl.FilePath + " does not exists")
}, nil
}
-func (k *KubernetesClient) createResources(sortedTemplates []helm.KubernetesResourceTemplate,
+func (k *KubernetesClient) createResources(ctx context.Context, sortedTemplates []helm.KubernetesResourceTemplate,
namespace string) ([]helm.KubernetesResource, error) {
+ ctx, span := tracer.Start(ctx, "KubernetesClient.createResources")
+ defer span.End()
var createdResources []helm.KubernetesResource
- err := k.ensureNamespace(namespace)
+ err := k.ensureNamespace(ctx, namespace)
if err != nil {
return createdResources, pkgerrors.Wrap(err, "Creating Namespace")
}
for _, resTempl := range sortedTemplates {
- resCreated, err := k.CreateKind(resTempl, namespace)
+ resCreated, err := k.CreateKind(ctx, resTempl, namespace)
if err != nil {
return createdResources, pkgerrors.Wrapf(err, "Error creating kind: %+v", resTempl.GVK)
}
return createdResources, nil
}
-func (k *KubernetesClient) updateResources(sortedTemplates []helm.KubernetesResourceTemplate,
+func (k *KubernetesClient) updateResources(ctx context.Context, sortedTemplates []helm.KubernetesResourceTemplate,
namespace string, createIfDoNotExist bool) ([]helm.KubernetesResource, error) {
+ ctx, span := tracer.Start(ctx, "KubernetesClient.updateResources")
+ defer span.End()
- err := k.ensureNamespace(namespace)
+ err := k.ensureNamespace(ctx, namespace)
if err != nil {
return nil, pkgerrors.Wrap(err, "Creating Namespace")
}
var updatedResources []helm.KubernetesResource
for _, resTempl := range sortedTemplates {
- resUpdated, err := k.updateKind(resTempl, namespace, createIfDoNotExist)
+ resUpdated, err := k.updateKind(ctx, resTempl, namespace, createIfDoNotExist)
if err != nil {
return nil, pkgerrors.Wrapf(err, "Error updating kind: %+v", resTempl.GVK)
}
return nil
}
-func (k *KubernetesClient) deleteResources(resources []helm.KubernetesResource, namespace string) error {
+func (k *KubernetesClient) deleteResources(ctx context.Context, resources []helm.KubernetesResource, namespace string) error {
+ _, deleteSpan := tracer.Start(ctx, "KubernetesClient.deleteResources")
+ defer deleteSpan.End()
//TODO: Investigate if deletion should be in a particular order
for _, res := range resources {
err := k.DeleteKind(res, namespace)
package app
import (
+ "context"
"encoding/base64"
"io/ioutil"
"os"
kubeClient := KubernetesClient{}
// Refer to the connection via its name
- err = kubeClient.Init("mock_connection", "abcdefg")
+ err = kubeClient.Init(context.TODO(), "mock_connection", "abcdefg")
if err != nil {
t.Fatalf("TestGetKubeClient returned an error (%s)", err)
}
},
}
- _, err := k8.createResources(data, "testnamespace")
+ _, err := k8.createResources(context.TODO(), data, "testnamespace")
if err != nil {
t.Fatalf("TestCreateResources returned an error (%s)", err)
}
},
}
- err := k8.deleteResources(data, "test")
+ err := k8.deleteResources(context.TODO(), data, "test")
if err != nil {
t.Fatalf("TestCreateVNF returned an error (%s)", err)
}
// Keep thread running
log.Printf("[scheduleResources]: START thread")
for {
+ ctx := context.TODO()
data := <-c
//TODO: ADD Check to see if Application running
ic := NewInstanceClient()
- resp, err := ic.Find(context.TODO(), data.profile.RBName, data.profile.RBVersion, data.profile.ProfileName, nil)
+ resp, err := ic.Find(ctx, data.profile.RBName, data.profile.RBVersion, data.profile.ProfileName, nil)
if (err != nil || len(resp) == 0) && data.action != "STOP" {
log.Println("Error finding a running instance. Retrying later...")
data.updatedResources <- []KubernetesConfigResource{}
resources := []KubernetesConfigResource{}
for _, inst := range resp {
k8sClient := KubernetesClient{}
- err = k8sClient.Init(inst.Request.CloudRegion, inst.ID)
+ err = k8sClient.Init(ctx, inst.Request.CloudRegion, inst.ID)
if err != nil {
log.Printf("Getting CloudRegion Information: %s", err.Error())
//Move onto the next cloud region
}
for _, res := range data.resourceTemplates {
var resToCreateOrUpdate = []helm.KubernetesResourceTemplate{res}
- resProceeded, err := k8sClient.createResources(resToCreateOrUpdate, inst.Namespace)
+ resProceeded, err := k8sClient.createResources(ctx, resToCreateOrUpdate, inst.Namespace)
errCreate := err
var status string = ""
if err != nil {
// assuming - the err represent the resource already exist, so going for update
- resProceeded, err = k8sClient.updateResources(resToCreateOrUpdate, inst.Namespace, false)
+ resProceeded, err = k8sClient.updateResources(ctx, resToCreateOrUpdate, inst.Namespace, false)
if err != nil {
log.Printf("Error Creating resources: %s", errCreate.Error())
log.Printf("Error Updating resources: %s", err.Error())
log.Printf("[scheduleResources]: DELETE %v %v", data.profile, data.resources)
for _, inst := range resp {
k8sClient := KubernetesClient{}
- err = k8sClient.Init(inst.Request.CloudRegion, inst.ID)
+ err = k8sClient.Init(ctx, inst.Request.CloudRegion, inst.ID)
if err != nil {
log.Printf("Getting CloudRegion Information: %s", err.Error())
//Move onto the next cloud region
for _, res := range data.resources {
tmpResources = append(tmpResources, res.Resource)
}
- err = k8sClient.deleteResources(helm.GetReverseK8sResources(tmpResources), inst.Namespace)
+ err = k8sClient.deleteResources(context.TODO(), helm.GetReverseK8sResources(tmpResources), inst.Namespace)
if err != nil {
log.Printf("Error Deleting resources: %s", err.Error())
continue
timeout int64,
startIndex int,
dbData *InstanceDbData) error {
+ ctx := context.TODO()
executingHooks := hc.getHookByEvent(hs, hook)
key := InstanceKey{
ID: hc.id,
GVK: h.KRT.GVK,
FilePath: h.KRT.FilePath,
}
- createdHook, err := k8sClient.CreateKind(resTempl, hc.kubeNameSpace)
+ createdHook, err := k8sClient.CreateKind(ctx, resTempl, hc.kubeNameSpace)
if err != nil {
log.Printf(" Instance: %s, Warning: %s hook %s, filePath: %s, error: %s", hc.id, hook, h.Hook.Name, h.KRT.FilePath, err)
hc.deleteHookByPolicy(h, release.HookFailed, k8sClient)
}
if hookHasDeletePolicy(h, policy) {
log.Printf(" Instance: %s, Deleting hook %s due to %q policy", hc.id, h.Hook.Name, policy)
- if errHookDelete := k8sClient.deleteResources(append([]helm.KubernetesResource{}, rss), hc.kubeNameSpace); errHookDelete != nil {
+ if errHookDelete := k8sClient.deleteResources(context.TODO(), append([]helm.KubernetesResource{}, rss), hc.kubeNameSpace); errHookDelete != nil {
if strings.Contains(errHookDelete.Error(), "not found") {
return nil
} else {
isDeleted := false
for !isDeleted {
log.Printf(" Instance: %s, Waiting on deleting hook %s for release %s due to %q policy", hc.id, h.Hook.Name, hc.id, policy)
- if _, err := k8sClient.GetResourceStatus(rss, hc.kubeNameSpace); err != nil {
+ if _, err := k8sClient.GetResourceStatus(context.TODO(), rss, hc.kubeNameSpace); err != nil {
if strings.Contains(err.Error(), "not found") {
log.Printf(" Instance: %s, Deleted hook %s for release %s due to %q policy", hc.id, h.Hook.Name, hc.id, policy)
return nil
package app
import (
+ "context"
"encoding/base64"
- "github.com/onap/multicloud-k8s/src/k8splugin/internal/utils"
+ "io/ioutil"
+ "testing"
+
"github.com/onap/multicloud-k8s/src/k8splugin/internal/connection"
- "github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/db"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/utils"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/time"
- "io/ioutil"
"k8s.io/apimachinery/pkg/runtime/schema"
- "testing"
)
func generateHookList() []*helm.Hook {
var hookList []*helm.Hook
preInstallHook1 := helm.Hook{
Hook: release.Hook{
- Name : "preinstall1",
- Kind : "Job",
- Path : "",
- Manifest : "",
- Events : []release.HookEvent{release.HookPreInstall},
- LastRun : release.HookExecution{
+ Name: "preinstall1",
+ Kind: "Job",
+ Path: "",
+ Manifest: "",
+ Events: []release.HookEvent{release.HookPreInstall},
+ LastRun: release.HookExecution{
StartedAt: time.Now(),
CompletedAt: time.Now(),
Phase: "",
},
- Weight : -5,
- DeletePolicies : []release.HookDeletePolicy{},
+ Weight: -5,
+ DeletePolicies: []release.HookDeletePolicy{},
},
- KRT: helm.KubernetesResourceTemplate{
+ KRT: helm.KubernetesResourceTemplate{
GVK: schema.GroupVersionKind{
Group: "batch",
Version: "v1",
}
preInstallHook2 := helm.Hook{
Hook: release.Hook{
- Name : "preinstall2",
- Kind : "Deployment",
- Path : "",
- Manifest : "",
- Events : []release.HookEvent{release.HookPreInstall},
- LastRun : release.HookExecution{
+ Name: "preinstall2",
+ Kind: "Deployment",
+ Path: "",
+ Manifest: "",
+ Events: []release.HookEvent{release.HookPreInstall},
+ LastRun: release.HookExecution{
StartedAt: time.Now(),
CompletedAt: time.Now(),
Phase: "",
},
- Weight : 0,
- DeletePolicies : []release.HookDeletePolicy{},
+ Weight: 0,
+ DeletePolicies: []release.HookDeletePolicy{},
},
- KRT: helm.KubernetesResourceTemplate{
+ KRT: helm.KubernetesResourceTemplate{
GVK: schema.GroupVersionKind{
Group: "batch",
Version: "v1",
}
postInstallHook := helm.Hook{
Hook: release.Hook{
- Name : "postinstall",
- Kind : "Job",
- Path : "",
- Manifest : "",
- Events : []release.HookEvent{release.HookPostInstall},
- LastRun : release.HookExecution{
+ Name: "postinstall",
+ Kind: "Job",
+ Path: "",
+ Manifest: "",
+ Events: []release.HookEvent{release.HookPostInstall},
+ LastRun: release.HookExecution{
StartedAt: time.Now(),
CompletedAt: time.Now(),
Phase: "",
},
- Weight : -5,
- DeletePolicies : []release.HookDeletePolicy{},
+ Weight: -5,
+ DeletePolicies: []release.HookDeletePolicy{},
},
- KRT: helm.KubernetesResourceTemplate{
+ KRT: helm.KubernetesResourceTemplate{
GVK: schema.GroupVersionKind{
Group: "batch",
Version: "v1",
}
preDeleteHook := helm.Hook{
Hook: release.Hook{
- Name : "predelete",
- Kind : "Job",
- Path : "",
- Manifest : "",
- Events : []release.HookEvent{release.HookPreDelete},
- LastRun : release.HookExecution{
+ Name: "predelete",
+ Kind: "Job",
+ Path: "",
+ Manifest: "",
+ Events: []release.HookEvent{release.HookPreDelete},
+ LastRun: release.HookExecution{
StartedAt: time.Now(),
CompletedAt: time.Now(),
Phase: "",
},
- Weight : -5,
- DeletePolicies : []release.HookDeletePolicy{},
+ Weight: -5,
+ DeletePolicies: []release.HookDeletePolicy{},
},
- KRT: helm.KubernetesResourceTemplate{
+ KRT: helm.KubernetesResourceTemplate{
GVK: schema.GroupVersionKind{
Group: "batch",
Version: "v1",
}
postDeleteHook := helm.Hook{
Hook: release.Hook{
- Name : "postdelete",
- Kind : "Job",
- Path : "",
- Manifest : "",
- Events : []release.HookEvent{release.HookPostDelete},
- LastRun : release.HookExecution{
+ Name: "postdelete",
+ Kind: "Job",
+ Path: "",
+ Manifest: "",
+ Events: []release.HookEvent{release.HookPostDelete},
+ LastRun: release.HookExecution{
StartedAt: time.Now(),
CompletedAt: time.Now(),
Phase: "",
},
- Weight : -5,
- DeletePolicies : []release.HookDeletePolicy{},
+ Weight: -5,
+ DeletePolicies: []release.HookDeletePolicy{},
},
- KRT: helm.KubernetesResourceTemplate{
+ KRT: helm.KubernetesResourceTemplate{
GVK: schema.GroupVersionKind{
Group: "batch",
Version: "v1",
}
k8sClient := KubernetesClient{}
- err = k8sClient.Init("mock_connection", "test")
+ err = k8sClient.Init(context.TODO(), "mock_connection", "test")
if err != nil {
t.Fatal(err.Error())
}
- err = hookClient.ExecHook(k8sClient, hookList, release.HookPreInstall,10,0, nil)
+ err = hookClient.ExecHook(k8sClient, hookList, release.HookPreInstall, 10, 0, nil)
if err != nil {
t.Fatal(err.Error())
}
- err = hookClient.ExecHook(k8sClient, hookList, release.HookPostInstall,10,0, nil)
+ err = hookClient.ExecHook(k8sClient, hookList, release.HookPostInstall, 10, 0, nil)
if err != nil {
t.Fatal(err.Error())
}
- err = hookClient.ExecHook(k8sClient, hookList, release.HookPreDelete,10,0, nil)
+ err = hookClient.ExecHook(k8sClient, hookList, release.HookPreDelete, 10, 0, nil)
if err != nil {
t.Fatal(err.Error())
}
- err = hookClient.ExecHook(k8sClient, hookList, release.HookPostDelete,10,0, nil)
+ err = hookClient.ExecHook(k8sClient, hookList, release.HookPostDelete, 10, 0, nil)
if err != nil {
t.Fatal(err.Error())
}
-}
\ No newline at end of file
+}
"strings"
"time"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
ID string `json:"id"`
}
+var tracer = otel.Tracer("internal/app")
+
// We will use json marshalling to convert to string to
// preserve the underlying structure.
func (dk InstanceKey) String() string {
}
k8sClient := KubernetesClient{}
- err = k8sClient.Init(i.CloudRegion, finalId)
+ err = k8sClient.Init(ctx, i.CloudRegion, finalId)
if err != nil {
namegenerator.Release(generatedId)
return InstanceResponse{}, pkgerrors.Wrap(err, "Getting CloudRegion Information")
PostUpgradeTimeout: hookTimeoutInfo.postUpgradeTimeout,
}
- err = k8sClient.ensureNamespace(profile.Namespace)
+ err = k8sClient.ensureNamespace(ctx, profile.Namespace)
if err != nil {
namegenerator.Release(generatedId)
return InstanceResponse{}, pkgerrors.Wrap(err, "Creating Namespace")
key := InstanceKey{
ID: finalId,
}
- err = db.DBconn.Create(context.TODO(), v.storeName, key, v.tagInst, dbData)
+ err = db.DBconn.Create(ctx, v.storeName, key, v.tagInst, dbData)
if err != nil {
namegenerator.Release(generatedId)
return InstanceResponse{}, pkgerrors.Wrap(err, "Creating Instance DB Entry")
if len(crdList) > 0 {
log.Printf("Pre-Installing CRDs")
- _, err = k8sClient.createResources(crdList, profile.Namespace)
+ _, err = k8sClient.createResources(ctx, crdList, profile.Namespace)
if err != nil {
return InstanceResponse{}, pkgerrors.Wrap(err, "Pre-Installing CRDs")
err = hookClient.ExecHook(k8sClient, hookList, release.HookPreInstall, hookTimeoutInfo.preInstallTimeOut, 0, &dbData)
if err != nil {
log.Printf("Error running preinstall hooks for release %s, Error: %s. Stop here", releaseName, err)
- err2 := db.DBconn.Delete(context.TODO(), v.storeName, key, v.tagInst)
+ err2 := db.DBconn.Delete(ctx, v.storeName, key, v.tagInst)
if err2 != nil {
log.Printf("Error cleaning failed instance in DB, please check DB.")
} else {
}
dbData.Status = "CREATING"
- err = db.DBconn.Update(context.TODO(), v.storeName, key, v.tagInst, dbData)
+ err = db.DBconn.Update(ctx, v.storeName, key, v.tagInst, dbData)
if err != nil {
- err2 := db.DBconn.Delete(context.TODO(), v.storeName, key, v.tagInst)
+ err2 := db.DBconn.Delete(ctx, v.storeName, key, v.tagInst)
if err2 != nil {
log.Printf("Delete Instance DB Entry for release %s has error.", releaseName)
} else {
}
//Main rss creation is supposed to be very quick -> no need to support recover for main rss
- createdResources, err := k8sClient.createResources(sortedTemplates, profile.Namespace)
+ createdResources, err := k8sClient.createResources(ctx, sortedTemplates, profile.Namespace)
if err != nil {
if len(createdResources) > 0 {
log.Printf("[Instance] Reverting created resources on Error: %s", err.Error())
- k8sClient.deleteResources(helm.GetReverseK8sResources(createdResources), profile.Namespace)
+ k8sClient.deleteResources(ctx, helm.GetReverseK8sResources(createdResources), profile.Namespace)
}
log.Printf(" Instance: %s, Main rss are failed, skip post-install and remove instance in DB", finalId)
//main rss creation failed -> remove instance in DB
- err2 := db.DBconn.Delete(context.TODO(), v.storeName, key, v.tagInst)
+ err2 := db.DBconn.Delete(ctx, v.storeName, key, v.tagInst)
if err2 != nil {
log.Printf("Delete Instance DB Entry for release %s has error.", releaseName)
} else {
dbData.Status = "CREATED"
dbData.Resources = createdResources
- err = db.DBconn.Update(context.TODO(), v.storeName, key, v.tagInst, dbData)
+ err = db.DBconn.Update(ctx, v.storeName, key, v.tagInst, dbData)
if err != nil {
return InstanceResponse{}, pkgerrors.Wrap(err, "Update Instance DB Entry")
}
} else {
dbData.Status = "DONE"
}
- err = db.DBconn.Update(context.TODO(), v.storeName, key, v.tagInst, dbData)
+ err = db.DBconn.Update(ctx, v.storeName, key, v.tagInst, dbData)
if err != nil {
log.Printf("Update Instance DB Entry for release %s has error.", releaseName)
}
}()
} else {
dbData.Status = "DONE"
- err = db.DBconn.Update(context.TODO(), v.storeName, key, v.tagInst, dbData)
+ err = db.DBconn.Update(ctx, v.storeName, key, v.tagInst, dbData)
if err != nil {
log.Printf("Update Instance DB Entry for release %s has error.", releaseName)
}
}
k8sClient := KubernetesClient{}
- err = k8sClient.Init(i.CloudRegion, id)
+ err = k8sClient.Init(ctx, i.CloudRegion, id)
if err != nil {
return InstanceResponse{}, pkgerrors.Wrap(err, "Getting CloudRegion Information")
}
PostUpgradeTimeout: hookTimeoutInfo.postUpgradeTimeout,
}
- err = k8sClient.ensureNamespace(profile.Namespace)
+ err = k8sClient.ensureNamespace(ctx, profile.Namespace)
if err != nil {
return InstanceResponse{}, pkgerrors.Wrap(err, "Creating Namespace")
}
if len(crdList) > 0 {
log.Printf("Pre-Installing CRDs")
- _, err = k8sClient.createResources(crdList, profile.Namespace)
+ _, err = k8sClient.createResources(ctx, crdList, profile.Namespace)
if err != nil {
return InstanceResponse{}, pkgerrors.Wrap(err, "Pre-Installing CRDs")
return InstanceResponse{}, pkgerrors.Wrap(err, "Update Instance DB Entry")
}
- upgradedResources, err := k8sClient.updateResources(sortedTemplates, profile.Namespace, true)
+ upgradedResources, err := k8sClient.updateResources(ctx, sortedTemplates, profile.Namespace, true)
if err != nil {
log.Printf(" Instance: %s, Main rss are failed, skip post-upgrade", id)
return InstanceResponse{}, pkgerrors.Wrap(err, "Upgrade Kubernetes Resources")
exists = true
break
} else {
- status1, err := k8sClient.GetResourceStatus(res, profile.Namespace)
- status2, err2 := k8sClient.GetResourceStatus(pastRes, currentInstance.Namespace)
+ status1, err := k8sClient.GetResourceStatus(ctx, res, profile.Namespace)
+ status2, err2 := k8sClient.GetResourceStatus(ctx, pastRes, currentInstance.Namespace)
if err == nil && err2 == nil && status1.Value() == status2.Value() {
//only when resource is namespace-less
exists = true
}
}
- err = k8sClient.deleteResources(helm.GetReverseK8sResources(resToDelete), currentInstance.Namespace)
+ err = k8sClient.deleteResources(ctx, helm.GetReverseK8sResources(resToDelete), currentInstance.Namespace)
configClient := NewConfigClient()
configList, err := configClient.List(id)
// Status returns the status for the instance
func (v *InstanceClient) Status(ctx context.Context, id string, checkReady bool) (InstanceStatus, error) {
+ ctx, statusSpan := tracer.Start(ctx, "InstanceClient.Status")
+ statusSpan.SetAttributes(
+ attribute.String(string(semconv.CodeFunctionKey), "Status"),
+ attribute.String(string(semconv.CodeNamespaceKey), "k8splugin/internal/app.InstanceClient"),
+ )
+ defer statusSpan.End()
//Read the status from the DB
key := InstanceKey{
ID: id,
}
k8sClient := KubernetesClient{}
- err = k8sClient.Init(resResp.Request.CloudRegion, id)
+ err = k8sClient.Init(ctx, resResp.Request.CloudRegion, id)
if err != nil {
return InstanceStatus{}, pkgerrors.Wrap(err, "Getting CloudRegion Information")
}
}
cumulatedErrorMsg := make([]string, 0)
- podsStatus, err := k8sClient.getPodsByLabel(resResp.Namespace)
+ podsStatus, err := k8sClient.getPodsByLabel(ctx, resResp.Namespace)
if err != nil {
cumulatedErrorMsg = append(cumulatedErrorMsg, err.Error())
}
continue Main //Don't double check pods if someone decided to define pod explicitly in helm chart
}
}
- status, err := k8sClient.GetResourceStatus(oneResource, resResp.Namespace)
+ status, err := k8sClient.GetResourceStatus(ctx, oneResource, resResp.Namespace)
if err != nil {
cumulatedErrorMsg = append(cumulatedErrorMsg, err.Error())
isReady = false
}
k8sClient := KubernetesClient{}
- err = k8sClient.Init(inst.Request.CloudRegion, inst.ID)
+ err = k8sClient.Init(ctx, inst.Request.CloudRegion, inst.ID)
if err != nil {
return pkgerrors.Wrap(err, "Getting CloudRegion Information")
}
return pkgerrors.Wrap(err, "Cleanup Config Resources")
}
- err = k8sClient.deleteResources(helm.GetReverseK8sResources(inst.Resources), inst.Namespace)
+ err = k8sClient.deleteResources(ctx, helm.GetReverseK8sResources(inst.Resources), inst.Namespace)
if err != nil {
return pkgerrors.Wrap(err, "Deleting Instance Resources")
}
return nil
}
k8sClient := KubernetesClient{}
- err = k8sClient.Init(instance.Request.CloudRegion, id)
+ err = k8sClient.Init(ctx, instance.Request.CloudRegion, id)
if err != nil {
log.Printf(" Error getting CloudRegion %s", instance.Request.CloudRegion)
return nil
return
}
- err = k8sClient.deleteResources(helm.GetReverseK8sResources(instance.Resources), instance.Namespace)
+ err = k8sClient.deleteResources(ctx, helm.GetReverseK8sResources(instance.Resources), instance.Namespace)
if err != nil {
log.Printf(" Error running deleting instance resources, error: %s", err)
return
GVK: h.KRT.GVK,
Name: h.Hook.Name,
}
- if _, err := k8sClient.GetResourceStatus(res, hookClient.kubeNameSpace); err == nil {
+ if _, err := k8sClient.GetResourceStatus(context.TODO(), res, hookClient.kubeNameSpace); err == nil {
remainHookRss = append(remainHookRss, res)
log.Printf(" Rss %s will be deleted.", res.Name)
}
}
if len(remainHookRss) > 0 {
- err = k8sClient.deleteResources(remainHookRss, hookClient.kubeNameSpace)
+ err = k8sClient.deleteResources(context.TODO(), remainHookRss, hookClient.kubeNameSpace)
if err != nil {
log.Printf("Error cleaning Hook Rss, please do it manually if needed. Error: %s", err.Error())
}
package app
import (
+ "context"
+
pkgerrors "github.com/pkg/errors"
)
//Read the status from the DD
k8sClient := KubernetesClient{}
- err := k8sClient.Init(cloudRegion, "dummy") //we don't care about instance id in this request
+ err := k8sClient.Init(context.TODO(), cloudRegion, "dummy") //we don't care about instance id in this request
if err != nil {
return QueryStatus{}, pkgerrors.Wrap(err, "Getting CloudRegion Information")
}
var resourcesStatus []ResourceStatus
if name != "" {
- resList, err := k8sClient.queryResources(apiVersion, kind, labels, namespace)
+ resList, err := k8sClient.queryResources(context.TODO(), apiVersion, kind, labels, namespace)
if err != nil {
return QueryStatus{}, pkgerrors.Wrap(err, "Querying Resources")
}
resourcesStatus = resList
}
} else {
- resList, err := k8sClient.queryResources(apiVersion, kind, labels, namespace)
+ resList, err := k8sClient.queryResources(context.TODO(), apiVersion, kind, labels, namespace)
if err != nil {
return QueryStatus{}, pkgerrors.Wrap(err, "Querying Resources")
}
})
v := NewInstanceClient()
k8sClient := KubernetesClient{}
- instance, err := v.Get(context.TODO(), instanceId)
+ ctx := context.TODO()
+ instance, err := v.Get(ctx, instanceId)
if err != nil {
return pkgerrors.Wrap(err, "Cannot get instance for notify thread")
}
if err != nil {
return pkgerrors.Wrap(err, "Unable to find Profile instance status")
}
- err = k8sClient.Init(instance.Request.CloudRegion, instanceId)
+ err = k8sClient.Init(ctx, instance.Request.CloudRegion, instanceId)
if err != nil {
return pkgerrors.Wrap(err, "Cannot set k8s client for instance")
}
//Determine Cloud Region and namespace
v := app.NewInstanceClient()
- instance, err := v.Get(context.TODO(), instanceId)
+ ctx := context.TODO()
+ instance, err := v.Get(ctx, instanceId)
if err != nil {
return InstanceMiniHCStatus{}, pkgerrors.Wrap(err, "Getting instance")
}
k8sClient := app.KubernetesClient{}
- err = k8sClient.Init(instance.Request.CloudRegion, instanceId)
+ err = k8sClient.Init(ctx, instance.Request.CloudRegion, instanceId)
if err != nil {
return InstanceMiniHCStatus{}, pkgerrors.Wrap(err, "Preparing KubeClient")
}
for _, h := range hooks {
h.Status.StartedAt = time.Now()
- kr, err := k8sClient.CreateKind(h.Definition.KRT, instance.Namespace)
+ kr, err := k8sClient.CreateKind(ctx, h.Definition.KRT, instance.Namespace)
if err != nil {
// Set status fields
h.Status.Status = release.HookPhaseFailed
retErr := "Starting hook " + h.Status.Name
// Dump to DB
- err = db.DBconn.Create(context.TODO(), ihc.storeName, key, ihc.tagInst, ihcs)
+ err = db.DBconn.Create(ctx, ihc.storeName, key, ihc.tagInst, ihcs)
if err != nil {
retErr = retErr + " and couldn't save to DB"
}
h.Status.KR = kr
}
}
- err = db.DBconn.Create(context.TODO(), ihc.storeName, key, ihc.tagInst, ihcs)
+ err = db.DBconn.Create(ctx, ihc.storeName, key, ihc.tagInst, ihcs)
if err != nil {
return instanceMiniHCStatusFromStatus(ihcs),
pkgerrors.Wrap(err, "Creating Instance DB Entry")
"Reason": map[bool]string{true: "Hook finished", false: "All hooks finished"}[b],
})
if b { //Some hook finished - need to update DB
- err = db.DBconn.Update(context.TODO(), ihc.storeName, key, ihc.tagInst, ihcs)
+ err = db.DBconn.Update(ctx, ihc.storeName, key, ihc.tagInst, ihcs)
if err != nil {
log.Error("Couldn't update database", log.Fields{
"Store": ihc.storeName,
}
}
ihcs.Status = finalResult
- err = db.DBconn.Update(context.TODO(), ihc.storeName, key, ihc.tagInst, ihcs)
+ err = db.DBconn.Update(ctx, ihc.storeName, key, ihc.tagInst, ihcs)
if err != nil {
log.Error("Couldn't update database", log.Fields{
"Store": ihc.storeName,
return pkgerrors.Wrap(err, "Error querying Healthcheck status")
}
k8sClient := app.KubernetesClient{}
- err = k8sClient.Init(instance.Request.CloudRegion, instanceId)
+ err = k8sClient.Init(context.TODO(), instance.Request.CloudRegion, instanceId)
if err != nil {
return pkgerrors.Wrap(err, "Preparing KubeClient")
}
package healthcheck
import (
+ "context"
"time"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/app"
}
for {
- res, err := k8sClient.GetResourceStatus(hookStatus.KR, namespace)
+ res, err := k8sClient.GetResourceStatus(context.TODO(), hookStatus.KR, namespace)
if err != nil {
log.Error("Unable to check Resource Status", log.Fields{
"Resource": hookStatus.KR,