Skip to content
This repository has been archived by the owner on Jun 26, 2023. It is now read-only.

[MTB] Sort benchmarks in scorecard #993

Merged
merged 6 commits into from
Aug 14, 2020
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions benchmarks/kubectl-mtb/internal/kubectl-mtb/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ func runTests(cmd *cobra.Command, args []string) error {
suiteSummary.NumberOfFailedValidations++
ts.Validation = false
ts.ValidationError = err
b.Status = "Error"
}

// Check PreRun status
Expand All @@ -198,8 +199,10 @@ func runTests(cmd *cobra.Command, args []string) error {
suiteSummary.NumberOfFailedTests++
ts.Test = false
ts.TestError = err
b.Status = "Fail"
} else {
suiteSummary.NumberOfPassedTests++
b.Status = "Pass"
}
}

Expand Down
23 changes: 9 additions & 14 deletions benchmarks/kubectl-mtb/internal/reporter/default_reporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,16 @@ import (
"os"
"strconv"

v1alpha1 "github.com/kubernetes-sigs/wg-policy-prototypes/policy-report/api/v1alpha1"
"github.com/olekukonko/tablewriter"
"sigs.k8s.io/multi-tenancy/benchmarks/kubectl-mtb/pkg/benchmark"
benchmarksuite "sigs.k8s.io/multi-tenancy/benchmarks/kubectl-mtb/pkg/benchmark_suite"
)

// DefaultReporter collects all the test summaries
type DefaultReporter struct {
testSummaries []*TestSummary
}

var testResult = map[*benchmark.Benchmark]v1alpha1.PolicyStatus{}

// NewDefaultReporter returns the pointer of DefaultReporter
func NewDefaultReporter() *DefaultReporter {
return &DefaultReporter{}
Expand All @@ -35,11 +33,9 @@ func (r *DefaultReporter) TestWillRun(testSummary *TestSummary) {
writer.Println(0, testSummary.Benchmark.Title)
writer.Println(0, writer.Colorize(grayColor, "%s", testSummary.Benchmark.Description))
if testSummary.Test {
testResult[testSummary.Benchmark] = "Pass"
passed := "Passed " + tick
writer.Println(0, writer.Colorize(greenColor, passed))
} else {
testResult[testSummary.Benchmark] = "Fail"
failed := "Failed " + cross
writer.Println(0, writer.Colorize(redColor, failed))
writer.Print(0, writer.Colorize(lilac, "Remediation: "))
Expand All @@ -49,7 +45,6 @@ func (r *DefaultReporter) TestWillRun(testSummary *TestSummary) {
writer.PrintBanner(writer.Colorize(grayColor, "Completed in %v", testSummary.RunTime), "-")
return
}
testResult[testSummary.Benchmark] = "Error"
preRunfmt := writer.Colorize(magentaColor, "[PreRun-Validation Error]")
errormsg := writer.Colorize(redColor, testSummary.ValidationError.Error())
bannerText := fmt.Sprintf("%s [%s] %s: %s %s", preRunfmt, testSummary.Benchmark.ID, testSummary.Benchmark.Title, errormsg, cross)
Expand All @@ -66,31 +61,31 @@ func (r *DefaultReporter) SuiteDidEnd(suiteSummary *SuiteSummary) {
writer.PrintNewLine()
writer.PrintBanner(writer.Colorize(grayColor, "Completed in %v", suiteSummary.RunTime), "=")

printScoreCard(testResult)
printScoreCard(benchmarksuite.SortedBenchmarks)
}

// FullSummary prints end result of all the tests at one place.
func printScoreCard(testResult map[*benchmark.Benchmark]v1alpha1.PolicyStatus) {
func printScoreCard(benchmarks []*benchmark.Benchmark) {
data := [][]string{}
counter := 0

for val, key := range testResult{
for _, b := range benchmarks {
counter++
var status string
switch key {

switch b.Status {
case "Error":
status = writer.Colorize(magentaColor, "Error")
case "Pass":
status = writer.Colorize(greenColor, "Passed")
case "Fail":
status = writer.Colorize(redColor, "Failed")
case "Skip":
default:
status = writer.Colorize(yellowColor, "Skipped")
}

testName := val.Title
result := []string{strconv.Itoa(counter), val.ID, testName, status}
testName := b.Title
result := []string{strconv.Itoa(counter), b.ID, testName, status}
data = append(data, result)
}

Expand Down
5 changes: 3 additions & 2 deletions benchmarks/kubectl-mtb/pkg/benchmark/benchmark.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@ type Benchmark struct {
Description string `yaml:"description"`
Remediation string `yaml:"remediation"`
ProfileLevel int `yaml:"profileLevel"`
PreRun func(types.RunOptions) error
Run func(types.RunOptions) error
Status string `yaml:"status"`
PreRun func(string, *kubernetes.Clientset, *kubernetes.Clientset) error
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

modify arguments to latest ones

Run func(string, *kubernetes.Clientset, *kubernetes.Clientset) error
PostRun func(types.RunOptions) error
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ type BenchmarkSuite struct {
Benchmarks []*benchmark.Benchmark
}

// SortedBenchmarks contains benchmarks sorted according to profile level, category and id
var SortedBenchmarks []*benchmark.Benchmark

// Totals returns count of benchmarks in Benchmark Suite
func (bs *BenchmarkSuite) Totals() int {
return len(bs.Benchmarks)
Expand All @@ -33,8 +36,8 @@ func (bs *BenchmarkSuite) ProfileLevel(pl int) []*benchmark.Benchmark {
benchmarksArray = append(benchmarksArray, b)
}
}
sortedBenchmarks := sortBenchmarks(benchmarksArray)
return sortedBenchmarks
SortedBenchmarks = sortBenchmarks(benchmarksArray)
return SortedBenchmarks
}

// sortBenchmarks returns slice of Benchmarks sorted according to Profile level, category and id respectively
Expand Down