Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Better logging for backend components #3939

Merged
merged 13 commits into from
Apr 18, 2023
6 changes: 3 additions & 3 deletions litmus-portal/graphql-server/graph/analytics.resolvers.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
analyticsOps "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/analytics/ops"
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/authorization"
data_store "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
)

func (r *mutationResolver) CreateDataSource(ctx context.Context, datasource *model.DSInput) (*model.DSResponse, error) {
Expand Down Expand Up @@ -166,13 +166,13 @@ func (r *queryResolver) ListPortalDashboardData(ctx context.Context, projectID s
func (r *subscriptionResolver) ViewDashboard(ctx context.Context, dashboardID *string, promQueries []*model.PromQueryInput, dashboardQueryMap []*model.QueryMapForPanelGroup, dataVariables model.DataVars) (<-chan *model.DashboardPromResponse, error) {
dashboardData := make(chan *model.DashboardPromResponse)
viewID := uuid.New()
logrus.Printf("Dashboard view %v created\n", viewID.String())
log.Infof("Dashboard view %v created\n", viewID.String())
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
data_store.Store.Mutex.Lock()
data_store.Store.DashboardData[viewID.String()] = dashboardData
data_store.Store.Mutex.Unlock()
go func() {
<-ctx.Done()
logrus.Printf("Closed dashboard view %v\n", viewID.String())
log.Infof("Closed dashboard view %v\n", viewID.String())
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
if _, ok := data_store.Store.DashboardData[viewID.String()]; ok {
analyticsOps.UpdateViewedAt(dashboardID, viewID.String())

Expand Down
18 changes: 9 additions & 9 deletions litmus-portal/graphql-server/graph/cluster.resolvers.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ import (
clusterHandler "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/cluster/handler"
data_store "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store"
dbOperationsCluster "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/cluster"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"go.mongodb.org/mongo-driver/bson"
)

Expand Down Expand Up @@ -103,7 +103,7 @@ func (r *queryResolver) GetManifest(ctx context.Context, projectID string, clust
}

func (r *subscriptionResolver) GetClusterEvents(ctx context.Context, projectID string) (<-chan *model.ClusterEventResponse, error) {
logrus.Print("NEW EVENT ", projectID)
log.Info("NEW EVENT ", projectID)
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
clusterEvent := make(chan *model.ClusterEventResponse, 1)

data_store.Store.Mutex.Lock()
Expand All @@ -118,11 +118,11 @@ func (r *subscriptionResolver) GetClusterEvents(ctx context.Context, projectID s
}

func (r *subscriptionResolver) ClusterConnect(ctx context.Context, clusterInfo model.ClusterIdentity) (<-chan *model.ClusterActionResponse, error) {
logrus.Print("NEW CLUSTER CONNECT: ", clusterInfo.ClusterID)
log.Info("NEW CLUSTER CONNECT: ", clusterInfo.ClusterID)
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
clusterAction := make(chan *model.ClusterActionResponse, 1)
verifiedCluster, err := cluster.VerifyCluster(clusterInfo)
if err != nil {
logrus.Print("VALIDATION FAILED: ", clusterInfo.ClusterID)
log.Error("VALIDATION FAILED: ", clusterInfo.ClusterID)
return clusterAction, err
}
data_store.Store.Mutex.Lock()
Expand All @@ -149,7 +149,7 @@ func (r *subscriptionResolver) ClusterConnect(ctx context.Context, clusterInfo m

err = dbOperationsCluster.UpdateCluster(query, update)
if err != nil {
logrus.Print("Error", err)
log.Error("Error", err)
}
}()

Expand All @@ -170,31 +170,31 @@ func (r *subscriptionResolver) ClusterConnect(ctx context.Context, clusterInfo m
}

func (r *subscriptionResolver) GetPodLog(ctx context.Context, request model.PodLogRequest) (<-chan *model.PodLogResponse, error) {
logrus.Print("NEW LOG REQUEST: ", request.ClusterID, request.PodName)
log.Info("NEW LOG REQUEST: ", request.ClusterID, request.PodName)
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
workflowLog := make(chan *model.PodLogResponse, 1)
reqID := uuid.New()
data_store.Store.Mutex.Lock()
data_store.Store.WorkflowLog[reqID.String()] = workflowLog
data_store.Store.Mutex.Unlock()
go func() {
<-ctx.Done()
logrus.Print("CLOSED LOG LISTENER: ", request.ClusterID, request.PodName)
log.Info("CLOSED LOG LISTENER: ", request.ClusterID, request.PodName)
delete(data_store.Store.WorkflowLog, reqID.String())
}()
go wfHandler.GetLogs(reqID.String(), request, *data_store.Store)
return workflowLog, nil
}

func (r *subscriptionResolver) GetKubeObject(ctx context.Context, request model.KubeObjectRequest) (<-chan *model.KubeObjectResponse, error) {
logrus.Print("NEW KUBEOBJECT REQUEST", request.ClusterID)
log.Info("NEW KUBEOBJECT REQUEST", request.ClusterID)
kubeObjData := make(chan *model.KubeObjectResponse)
reqID := uuid.New()
data_store.Store.Mutex.Lock()
data_store.Store.KubeObjectData[reqID.String()] = kubeObjData
data_store.Store.Mutex.Unlock()
go func() {
<-ctx.Done()
logrus.Println("Closed KubeObj Listener")
log.Info("Closed KubeObj Listener")
delete(data_store.Store.KubeObjectData, reqID.String())
}()
go wfHandler.GetKubeObjData(reqID.String(), request, *data_store.Store)
Expand Down
12 changes: 6 additions & 6 deletions litmus-portal/graphql-server/graph/image_registry.resolvers.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/graph/model"
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/authorization"
imageRegistryOps "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/image_registry/ops"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
)

func (r *mutationResolver) CreateImageRegistry(ctx context.Context, projectID string, imageRegistryInfo model.ImageRegistryInput) (*model.ImageRegistryResponse, error) {
Expand All @@ -22,7 +22,7 @@ func (r *mutationResolver) CreateImageRegistry(ctx context.Context, projectID st

ciResponse, err := imageRegistryOps.CreateImageRegistry(ctx, projectID, imageRegistryInfo)
if err != nil {
logrus.Error(err)
log.Error(err)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you convert all

if err != nil 

condition logs to log.Fatal?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey @S-ayanide,
log.Fatal called os.Exit(1) so that process will exit with status set to 1. Is it okay?

https://pkg.go.dev/github.com/sirupsen/logrus#Fatal

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes that is the expectation, if something in the middle goes wrong we'd want to terminate that function with a Fatal message.

@amityt correct me if I'm wrong.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No log.Fatal is not required here. It will impact the complete server. Rather a return statement can be added here in this error block.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we use log.Fatal() to exit the process, k8s Deployment obj restarts the pod automatically. IMO, It's an unnecessary task. Moreover, all of the logs are deleted so we cannot analyze what is the problem.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, log.Fatal() should not be used here. 👍🏼

}
return ciResponse, err
}
Expand All @@ -37,7 +37,7 @@ func (r *mutationResolver) UpdateImageRegistry(ctx context.Context, imageRegistr

uiRegistry, err := imageRegistryOps.UpdateImageRegistry(ctx, imageRegistryID, projectID, imageRegistryInfo)
if err != nil {
logrus.Error(err)
log.Error(err)
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
}

return uiRegistry, err
Expand All @@ -53,7 +53,7 @@ func (r *mutationResolver) DeleteImageRegistry(ctx context.Context, imageRegistr

diRegistry, err := imageRegistryOps.DeleteImageRegistry(ctx, imageRegistryID, projectID)
if err != nil {
logrus.Error(err)
log.Error(err)
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
}

return diRegistry, err
Expand All @@ -69,7 +69,7 @@ func (r *queryResolver) ListImageRegistry(ctx context.Context, projectID string)

imageRegistries, err := imageRegistryOps.ListImageRegistries(ctx, projectID)
if err != nil {
logrus.Error(err)
log.Error(err)
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
}

return imageRegistries, err
Expand All @@ -85,7 +85,7 @@ func (r *queryResolver) GetImageRegistry(ctx context.Context, imageRegistryID st

imageRegistry, err := imageRegistryOps.GetImageRegistry(ctx, imageRegistryID, projectID)
if err != nil {
logrus.Error(err)
log.Error(err)
namkyu1999 marked this conversation as resolved.
Show resolved Hide resolved
}

return imageRegistry, err
Expand Down
8 changes: 4 additions & 4 deletions litmus-portal/graphql-server/graph/workflow.resolvers.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/authorization"
wfHandler "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/chaos-workflow/handler"
data_store "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
)

func (r *mutationResolver) CreateChaosWorkFlow(ctx context.Context, request model.ChaosWorkFlowRequest) (*model.ChaosWorkFlowResponse, error) {
Expand All @@ -36,7 +36,7 @@ func (r *mutationResolver) ReRunChaosWorkFlow(ctx context.Context, projectID str
username, err := authorization.GetUsername(tkn)

if err != nil {
logrus.Print("Error getting username: ", err)
log.Error("Error getting username: ", err)
return "", err
}

Expand Down Expand Up @@ -113,14 +113,14 @@ func (r *queryResolver) ListWorkflowRuns(ctx context.Context, request model.List
}

func (r *subscriptionResolver) GetWorkflowEvents(ctx context.Context, projectID string) (<-chan *model.WorkflowRun, error) {
logrus.Print("NEW WORKFLOW EVENT LISTENER: ", projectID)
log.Info("NEW WORKFLOW EVENT LISTENER: ", projectID)
workflowEvent := make(chan *model.WorkflowRun, 1)
data_store.Store.Mutex.Lock()
data_store.Store.WorkflowEventPublish[projectID] = append(data_store.Store.WorkflowEventPublish[projectID], workflowEvent)
data_store.Store.Mutex.Unlock()
go func() {
<-ctx.Done()
logrus.Print("CLOSED WORKFLOW LISTENER: ", projectID)
log.Info("CLOSED WORKFLOW LISTENER: ", projectID)
}()
return workflowEvent, nil
}
Expand Down
28 changes: 14 additions & 14 deletions litmus-portal/graphql-server/pkg/analytics/handler/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ import (
"errors"
"fmt"
"io/ioutil"
"log"
"sort"
"strconv"
"strings"
"sync"
"time"

store "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/data-store"
log "github.com/sirupsen/logrus"
"go.mongodb.org/mongo-driver/mongo"

"github.com/google/uuid"
Expand Down Expand Up @@ -162,15 +162,15 @@ func CreateDashboard(dashboard *model.CreateDBInput) (*model.ListDashboardRespon
if err != nil {
return nil, fmt.Errorf("error on inserting panel data", err)
}
log.Print("sucessfully inserted prom query into promquery-collection")
log.Info("sucessfully inserted prom query into promquery-collection")

newDashboard.PanelGroups = newPanelGroups

err = dbOperationsAnalytics.InsertDashBoard(newDashboard)
if err != nil {
return nil, fmt.Errorf("error on inserting panel data", err)
}
log.Print("sucessfully inserted dashboard into dashboard-collection")
log.Info("sucessfully inserted dashboard into dashboard-collection")

var newDBResponse = model.ListDashboardResponse{}
_ = copier.Copy(&newDBResponse, &newDashboard)
Expand Down Expand Up @@ -405,7 +405,7 @@ func UpdateDashBoard(projectID string, dashboard model.UpdateDBInput, chaosQuery
if err != nil {
return "error creating new panels", fmt.Errorf("error while inserting panel data", err)
}
log.Print("successfully inserted prom query into promquery-collection")
log.Info("successfully inserted prom query into promquery-collection")
}

if len(panelsToUpdate) > 0 {
Expand Down Expand Up @@ -750,7 +750,7 @@ func GetPrometheusData(promInput *model.PrometheusDataRequest) (*model.Prometheu
if strings.Contains(errorStr, "already exists") {
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
if cacheError != nil {
log.Printf("Error while caching: %v\n", cacheError)
log.Errorf("Error while caching: %v\n", cacheError)
}
}
}
Expand All @@ -763,7 +763,7 @@ func GetPrometheusData(promInput *model.PrometheusDataRequest) (*model.Prometheu
if strings.Contains(errorStr, "already exists") {
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
if cacheError != nil {
log.Printf("Error while caching: %v\n", cacheError)
log.Errorf("Error while caching: %v\n", cacheError)
}
}
}
Expand Down Expand Up @@ -827,7 +827,7 @@ func DashboardViewer(viewID string, dashboardID *string, promQueries []*model.Pr

newPromResponse, queryResponseMap, err := GetPrometheusData(newPromInput)
if err != nil {
log.Printf("Error during data source query of the dashboard view: %v\n", viewID)
log.Errorf("Error during data source query of the dashboard view: %v\n", viewID)
} else {
dashboardResponse := ops.MapMetricsToDashboard(dashboardQueryMap, newPromResponse, queryResponseMap)
viewChan <- dashboardResponse
Expand All @@ -848,7 +848,7 @@ func DashboardViewer(viewID string, dashboardID *string, promQueries []*model.Pr

newPromResponse, queryResponseMap, err := GetPrometheusData(newPromInput)
if err != nil {
log.Printf("Error during data source query of the dashboard view: %v at: %v \n", viewID, currentTime)
log.Errorf("Error during data source query of the dashboard view: %v at: %v \n", viewID, currentTime)
break
} else {
dashboardResponse := ops.MapMetricsToDashboard(dashboardQueryMap, newPromResponse, queryResponseMap)
Expand Down Expand Up @@ -878,14 +878,14 @@ func DashboardViewer(viewID string, dashboardID *string, promQueries []*model.Pr

newPromResponse, queryResponseMap, err := GetPrometheusData(newPromInput)
if err != nil {
log.Printf("Error during data source query of the dashboard view: %v at: %v \n", viewID, currentTime)
log.Errorf("Error during data source query of the dashboard view: %v at: %v \n", viewID, currentTime)
} else {
dashboardResponse := ops.MapMetricsToDashboard(dashboardQueryMap, newPromResponse, queryResponseMap)
viewChan <- dashboardResponse
}

case "invalid":
log.Printf("Wrong parameters for the dashboard view: %v\n", viewID)
log.Errorf("Wrong parameters for the dashboard view: %v\n", viewID)
}

ops.UpdateViewedAt(dashboardID, viewID)
Expand Down Expand Up @@ -919,7 +919,7 @@ func GetLabelNamesAndValues(promSeriesInput *model.PromSeriesInput) (*model.Prom
if strings.Contains(errorStr, "already exists") {
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
if cacheError != nil {
log.Printf("Error while caching: %v\n", cacheError)
log.Errorf("Error while caching: %v\n", cacheError)
}
}
}
Expand Down Expand Up @@ -953,7 +953,7 @@ func GetPromSeriesList(promSeriesListInput *model.DsDetails) (*model.PromSeriesL
if strings.Contains(errorStr, "already exists") {
cacheError = utils.UpdateCache(AnalyticsCache, cacheKey, response)
if cacheError != nil {
log.Printf("Error while caching: %v\n", cacheError)
log.Errorf("Error while caching: %v\n", cacheError)
}
}
}
Expand Down Expand Up @@ -1615,7 +1615,7 @@ func ListHeatmapData(workflow_id string, project_id string, year int) ([]*model.
// Result array
result := make([]*model.HeatmapDataResponse, 0, noOfDays)
if err = workflowsCursor.All(context.Background(), &chaosWorkflows); err != nil {
fmt.Println(err)
log.Error(err)
return result, nil
}

Expand All @@ -1629,7 +1629,7 @@ func ListHeatmapData(workflow_id string, project_id string, year int) ([]*model.
for _, workflowRun := range WorkflowRuns {
i, err := strconv.ParseInt(workflowRun.LastUpdated, 10, 64)
if err != nil {
fmt.Println("error", err)
log.Error("error", err)
}
lastUpdated := time.Unix(i, 0)
date := float64(lastUpdated.Unix())
Expand Down
18 changes: 9 additions & 9 deletions litmus-portal/graphql-server/pkg/analytics/ops/operations.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package ops
import (
"errors"
"fmt"
"log"
"strconv"
"strings"
"time"
Expand All @@ -13,6 +12,7 @@ import (
dbOperationsAnalytics "github.com/litmuschaos/litmus/litmus-portal/graphql-server/pkg/database/mongodb/analytics"
"github.com/litmuschaos/litmus/litmus-portal/graphql-server/utils"
"github.com/patrickmn/go-cache"
log "github.com/sirupsen/logrus"
"go.mongodb.org/mongo-driver/bson"
)

Expand Down Expand Up @@ -62,21 +62,21 @@ func PatchChaosEventWithVerdict(annotations []*model.AnnotationsPromResponse, ve
var existingAnnotations []*model.AnnotationsPromResponse
err := copier.Copy(&existingAnnotations, &annotations)
if err != nil {
log.Printf("Error parsing existing annotations %v\n", err)
log.Errorf("Error parsing existing annotations %v\n", err)
}

for annotationIndex, annotation := range existingAnnotations {
var existingAnnotation model.AnnotationsPromResponse
err := copier.Copy(&existingAnnotation, &annotation)
if err != nil {
log.Printf("Error parsing existing annotation %v\n", err)
log.Errorf("Error parsing existing annotation %v\n", err)
}

if strings.Contains(existingAnnotation.QueryID, "chaos-event") {
var newAnnotation model.AnnotationsPromResponse
err := copier.Copy(&newAnnotation, &verdictResponse)
if err != nil {
log.Printf("Error parsing new annotation %v\n", err)
log.Errorf("Error parsing new annotation %v\n", err)
}

duplicateEventIndices := make(map[int]int)
Expand Down Expand Up @@ -172,7 +172,7 @@ func PatchChaosEventWithVerdict(annotations []*model.AnnotationsPromResponse, ve
if strings.Contains(errorStr, "already exists") {
cacheError = utils.UpdateCache(AnalyticsCache, eventCacheKey, annotations[annotationIndex])
if cacheError != nil {
log.Printf("Error while caching: %v\n", cacheError)
log.Errorf("Error while caching: %v\n", cacheError)
}
}
}
Expand Down Expand Up @@ -209,7 +209,7 @@ func MapMetricsToDashboard(dashboardQueryMap []*model.QueryMapForPanelGroup, new
var promResponse model.PrometheusDataResponse
err := copier.Copy(&promResponse, &newPromResponse)
if err != nil {
log.Printf("Error parsing annotations %v\n", err)
log.Errorf("Error parsing annotations %v\n", err)
}
dashboardResponse := &model.DashboardPromResponse{
DashboardMetricsResponse: dashboardMetrics,
Expand All @@ -230,9 +230,9 @@ func UpdateViewedAt(dashboardID *string, viewID string) {
update := bson.D{{"$set", bson.D{{"viewed_at", timestamp}}}}
err := dbOperationsAnalytics.UpdateDashboard(query, update)
if err != nil {
log.Printf("error updating viewed_at field of the dashboard: %v\n", *dashboardID)
log.Errorf("error updating viewed_at field of the dashboard: %v\n", *dashboardID)
}
log.Printf("successfully updated viewed_at field of the dashboard: %v\n", *dashboardID)
log.Infof("successfully updated viewed_at field of the dashboard: %v\n", *dashboardID)
}
log.Printf("dashboard is not saved for the view: %v\n", viewID)
log.Errorf("dashboard is not saved for the view: %v\n", viewID)
}