Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
411 changes: 171 additions & 240 deletions pkg/api/componentreadiness/component_report.go

Large diffs are not rendered by default.

48 changes: 32 additions & 16 deletions pkg/api/componentreadiness/component_report_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,8 @@ func TestGenerateComponentReport(t *testing.T) {
},
},
ReportTestStats: crtype.ReportTestStats{
Comparison: crtype.FisherExact,
RequiredConfidence: 95,
Comparison: crtype.FisherExact,
Explanations: []string{
"Extreme regression detected.",
"Fishers Exact probability of a regression: 100.00%.",
Expand Down Expand Up @@ -532,7 +533,8 @@ func TestGenerateComponentReport(t *testing.T) {
},
},
ReportTestStats: crtype.ReportTestStats{
Comparison: crtype.FisherExact,
RequiredConfidence: 95,
Comparison: crtype.FisherExact,
Explanations: []string{
"Significant regression detected.",
"Fishers Exact probability of a regression: 100.00%.",
Expand Down Expand Up @@ -828,7 +830,8 @@ func TestGenerateComponentReport(t *testing.T) {
},
},
ReportTestStats: crtype.ReportTestStats{
Comparison: crtype.FisherExact,
RequiredConfidence: 90,
Comparison: crtype.FisherExact,
Explanations: []string{
"Significant regression detected.",
"Fishers Exact probability of a regression: 99.92%.",
Expand Down Expand Up @@ -1056,7 +1059,8 @@ func TestGenerateComponentReport(t *testing.T) {
},
},
ReportTestStats: crtype.ReportTestStats{
Comparison: crtype.FisherExact,
RequiredConfidence: 95,
Comparison: crtype.FisherExact,
Explanations: []string{
"Extreme regression detected.",
"Fishers Exact probability of a regression: 100.00%.",
Expand Down Expand Up @@ -1097,7 +1101,8 @@ func TestGenerateComponentReport(t *testing.T) {
},
},
ReportTestStats: crtype.ReportTestStats{
Comparison: crtype.FisherExact,
RequiredConfidence: 95,
Comparison: crtype.FisherExact,
Explanations: []string{
"Significant regression detected.",
"Fishers Exact probability of a regression: 100.00%.",
Expand Down Expand Up @@ -1171,7 +1176,7 @@ func TestGenerateComponentReport(t *testing.T) {
for ic := range report.Rows[ir].Columns {
assert.Equal(t, len(tc.expectedReport.Rows[ir].Columns[ic].RegressedTests), len(report.Rows[ir].Columns[ic].RegressedTests))
for it, regTest := range report.Rows[ir].Columns[ic].RegressedTests {
assert.InDelta(t, *tc.expectedReport.Rows[ir].Columns[ic].RegressedTests[it].FisherExact, *regTest.FisherExact, 0.000001)
assert.InDelta(t, *tc.expectedReport.Rows[ir].Columns[ic].RegressedTests[it].FisherExact, *regTest.FisherExact, 0.000001, regTest.TestName)
tc.expectedReport.Rows[ir].Columns[ic].RegressedTests[it].FisherExact = nil
report.Rows[ir].Columns[ic].RegressedTests[it].FisherExact = nil

Expand Down Expand Up @@ -1242,8 +1247,6 @@ func TestGenerateComponentTestDetailsReport(t *testing.T) {
FailureCount: 200,
FlakeCount: 100,
},
Start: &time.Time{},
End: &time.Time{},
}
sampleTestStatsHigh := crtype.TestDetailsTestStats{
SuccessRate: 0.9203539823008849,
Expand Down Expand Up @@ -1288,8 +1291,6 @@ func TestGenerateComponentTestDetailsReport(t *testing.T) {
FailureCount: 100,
FlakeCount: 50,
},
Start: &time.Time{},
End: &time.Time{},
}
sampleReleaseStatsOneLow := crtype.TestDetailsReleaseStats{
Release: testDetailsGenerator.ReqOptions.SampleRelease.Release,
Expand All @@ -1310,8 +1311,6 @@ func TestGenerateComponentTestDetailsReport(t *testing.T) {
FailureCount: 600,
FlakeCount: 50,
},
Start: &time.Time{},
End: &time.Time{},
}
tests := []struct {
name string
Expand Down Expand Up @@ -1887,16 +1886,33 @@ func Test_componentReportGenerator_assessComponentStatus(t *testing.T) {
c.ReqOptions.AdvancedOption.PassRateRequiredAllTests = tt.requiredPassRateForAllTests
c.ReqOptions.AdvancedOption.MinimumFailure = tt.minFail

testStats := c.assessComponentStatus(0, tt.sampleTotal, tt.sampleSuccess, tt.sampleFlake, tt.baseTotal, tt.baseSuccess, tt.baseFlake, nil, false, tt.numberOfIgnoredSamples, "dummyRelease", nil, nil)
assert.Equalf(t, tt.expectedStatus, testStats.ReportStatus, "assessComponentStatus expected status not equal")
testAnalysis := &crtype.ReportTestStats{
SampleStats: crtype.TestDetailsReleaseStats{
TestDetailsTestStats: crtype.TestDetailsTestStats{
SuccessCount: tt.sampleSuccess,
FlakeCount: tt.sampleFlake,
FailureCount: tt.sampleTotal - tt.sampleSuccess - tt.sampleFlake,
},
},
BaseStats: &crtype.TestDetailsReleaseStats{
TestDetailsTestStats: crtype.TestDetailsTestStats{
SuccessCount: tt.baseSuccess,
FlakeCount: tt.baseFlake,
FailureCount: tt.baseTotal - tt.baseSuccess - tt.baseFlake,
},
},
}

c.assessComponentStatus(testAnalysis, nil, false, tt.numberOfIgnoredSamples)
assert.Equalf(t, tt.expectedStatus, testAnalysis.ReportStatus, "assessComponentStatus expected status not equal")
if tt.expectedFischers != nil {
// Mac and Linux do not matchup on floating point precision, so lets approximate the comparison:
assert.Equalf(t,
fmt.Sprintf("%.4f", *tt.expectedFischers),
fmt.Sprintf("%.4f", *testStats.FisherExact),
fmt.Sprintf("%.4f", *testAnalysis.FisherExact),
"assessComponentStatus expected fischers value not equal")
} else {
assert.Nil(t, testStats.FisherExact)
assert.Nil(t, testAnalysis.FisherExact)
}

})
Expand Down
18 changes: 8 additions & 10 deletions pkg/api/componentreadiness/middleware/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,13 @@ type Middleware interface {
// QueryTestDetails phase allow middleware to load data that will later be used.
QueryTestDetails(ctx context.Context, wg *sync.WaitGroup, errCh chan error, allJobVariants crtype.JobVariants)

// Transform gives middleware opportunity to adjust the queried base and sample TestStatuses before we
// proceed to analysis.
Transform(status *crtype.ReportTestStatus) error
// PreAnalysis gives middleware opportunity to adjust test analysis data prior to running analysis.
// Implementations can alter base/sample data as needed, request confidence levels, and add explanations for
// what they did.
// NOTE: due to differences in test details reports, this function is not used there.
PreAnalysis(testKey crtype.ReportTestIdentification, testStats *crtype.ReportTestStats) error

// TransformTestDetails gives middleware opportunity to adjust the queried base and sample job run data
// before we proceed to analysis.
TransformTestDetails(status *crtype.JobRunTestReportStatus) error

// TestDetailsAnalyze gives middleware opportunity to analyze data and adjust the final report before
// being returned over the API.
TestDetailsAnalyze(report *crtype.ReportTestDetails) error
// PreTestDetailsAnalysis gives middleware the opportunity to adjust inputs to the report status
// prior to analysis.
PreTestDetailsAnalysis(status *crtype.JobRunTestReportStatus) error
}
Original file line number Diff line number Diff line change
Expand Up @@ -40,20 +40,11 @@ func (r *RegressionAllowances) Query(_ context.Context, _ *sync.WaitGroup, _ crt
// unused
}

// Transform iterates the base status looking for any with an accepted regression in the basis release, and if found
// PreAnalysis iterates the base status looking for any with an accepted regression in the basis release, and if found
// swaps out the stats with the better pass rate data specified in the intentional regression allowance.
func (r *RegressionAllowances) Transform(status *crtype.ReportTestStatus) error {
for testKeyStr, baseStats := range status.BaseStatus {
testKey, err := utils.DeserializeTestKey(baseStats, testKeyStr)
if err != nil {
return err
}
func (r *RegressionAllowances) PreAnalysis(testKey crtype.ReportTestIdentification, testStats *crtype.ReportTestStats) error {

newBaseStatus, newBaseRelease := r.matchBaseRegression(testKey, r.reqOptions.BaseRelease.Release, baseStats)
if newBaseRelease != r.reqOptions.BaseRelease.Release {
status.BaseStatus[testKeyStr] = newBaseStatus
}
}
r.matchBaseRegression(testKey, r.reqOptions.BaseRelease.Release, testStats)

return nil
}
Expand All @@ -62,50 +53,52 @@ func (r *RegressionAllowances) Transform(status *crtype.ReportTestStatus) error
// in an intentional regression that accepted a lower threshold but maintains the higher
// threshold when used as a basis.
// It will return the original testStatus if there is no intentional regression.
func (r *RegressionAllowances) matchBaseRegression(testID crtype.ReportTestIdentification, baseRelease string, baseStats crtype.TestStatus) (crtype.TestStatus, string) {
var baseRegression *regressionallowances.IntentionalRegression
func (r *RegressionAllowances) matchBaseRegression(testID crtype.ReportTestIdentification, baseRelease string, testStats *crtype.ReportTestStats) {
// Nothing to do for tests with no basis. (i.e. new tests)
if testStats.BaseStats == nil {
return
}

// TODO: knowledge of variant cross compare here would be nice to eliminate
var baseRegression *regressionallowances.IntentionalRegression
if len(r.reqOptions.VariantOption.VariantCrossCompare) == 0 {
// only really makes sense when not cross-comparing variants:
// look for corresponding regressions we can account for in the analysis
// only if we are ignoring fallback, otherwise we will let fallback determine the threshold
baseRegression = r.regressionGetterFunc(baseRelease, testID.ColumnIdentification, testID.TestID)

_, success, fail, flake := baseStats.GetTotalSuccessFailFlakeCounts()
basePassRate := utils.CalculatePassRate(r.reqOptions, success, fail, flake)
baseStats := testStats.BaseStats

success := baseStats.SuccessCount
fail := baseStats.FailureCount
flake := baseStats.FlakeCount
basePassRate := utils.CalculatePassRate(success, fail, flake, r.reqOptions.AdvancedOption.FlakeAsFailure)
if baseRegression != nil && baseRegression.PreviousPassPercentage(r.reqOptions.AdvancedOption.FlakeAsFailure) > basePassRate {
// override with the basis regression previous values
// testStats will reflect the expected threshold, not the computed values from the release with the allowed regression
baseRegressionPreviousRelease, err := utils.PreviousRelease(r.reqOptions.BaseRelease.Release)
if err != nil {
log.WithError(err).Error("Failed to determine the previous release for baseRegression")
} else {
// create a clone since we might be updating a cached item though the same regression would likely apply each time...
updatedStats := crtype.TestStatus{TestName: baseStats.TestName, TestSuite: baseStats.TestSuite, Capabilities: baseStats.Capabilities,
Component: baseStats.Component, Variants: baseStats.Variants,
FlakeCount: baseRegression.PreviousFlakes,
testStats.BaseStats.Release = baseRegressionPreviousRelease
testStats.BaseStats.TestDetailsTestStats = crtype.TestDetailsTestStats{
SuccessCount: baseRegression.PreviousSuccesses,
TotalCount: baseRegression.PreviousFailures + baseRegression.PreviousFlakes + baseRegression.PreviousSuccesses,
FailureCount: baseRegression.PreviousFailures,
FlakeCount: baseRegression.PreviousFlakes,
SuccessRate: utils.CalculatePassRate(baseRegression.PreviousSuccesses, baseRegression.PreviousFailures,
baseRegression.PreviousFlakes, r.reqOptions.AdvancedOption.FlakeAsFailure),
}
baseStats = updatedStats
baseRelease = baseRegressionPreviousRelease
log.Infof("BaseRegression - PreviousPassPercentage overrides baseStats. Release: %s, Successes: %d, Flakes: %d, Total: %d",
baseRelease, baseStats.SuccessCount, baseStats.FlakeCount, baseStats.TotalCount)

log.Infof("BaseRegression - PreviousPassPercentage overrides baseStats. Release: %s, Successes: %d, Flakes: %d",
baseRegressionPreviousRelease, baseStats.SuccessCount, baseStats.FlakeCount)
}
}
}

return baseStats, baseRelease
}

func (r *RegressionAllowances) QueryTestDetails(ctx context.Context, wg *sync.WaitGroup, errCh chan error, allJobVariants crtype.JobVariants) {
}

func (r *RegressionAllowances) TransformTestDetails(status *crtype.JobRunTestReportStatus) error {
return nil
}

func (r *RegressionAllowances) TestDetailsAnalyze(details *crtype.ReportTestDetails) error {
func (r *RegressionAllowances) PreTestDetailsAnalysis(status *crtype.JobRunTestReportStatus) error {
return nil
}
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
package regressionallowances

import (
"encoding/json"
"reflect"
"testing"

"github.com/openshift/sippy/pkg/api/componentreadiness/utils"
crtype "github.com/openshift/sippy/pkg/apis/api/componentreport"
"github.com/openshift/sippy/pkg/regressionallowances"
"github.com/stretchr/testify/assert"
Expand All @@ -19,9 +19,9 @@ func Test_Transform(t *testing.T) {
regressionGetter := func(releaseString string, variant crtype.ColumnIdentification, testID string) *regressionallowances.IntentionalRegression {
if releaseString == "4.18" && reflect.DeepEqual(variant.Variants, variants) && testID == test1ID {
return &regressionallowances.IntentionalRegression{
JiraComponent: "foo",
JiraComponent: "",
TestID: test1ID,
TestName: "test1",
TestName: "test 1",
Variant: variant,
PreviousSuccesses: 100,
PreviousFailures: 0,
Expand All @@ -40,74 +40,80 @@ func Test_Transform(t *testing.T) {
},
AdvancedOption: crtype.RequestAdvancedOptions{IncludeMultiReleaseAnalysis: true},
}
test1Variants := []string{"Arch:amd64", "Platform:aws"}
test1Key := crtype.TestWithVariantsKey{
TestID: test1ID,
Variants: variants,

test1Key := crtype.ReportTestIdentification{
RowIdentification: crtype.RowIdentification{
Component: "",
Capability: "",
TestName: "test 1",
TestSuite: "",
TestID: test1ID,
},
ColumnIdentification: crtype.ColumnIdentification{
Variants: variants,
},
}
test1KeyBytes, err := json.Marshal(test1Key)
test1KeyStr := string(test1KeyBytes)
assert.NoError(t, err)

test2ID := "test2ID"
test2Key := crtype.TestWithVariantsKey{
TestID: test2ID,
Variants: variants,
test2Key := crtype.ReportTestIdentification{
RowIdentification: crtype.RowIdentification{
Component: "",
Capability: "",
TestName: "test 2",
TestSuite: "",
TestID: test2ID,
},
ColumnIdentification: crtype.ColumnIdentification{
Variants: variants,
},
}
test2KeyBytes, err := json.Marshal(test2Key)
test2KeyStr := string(test2KeyBytes)
assert.NoError(t, err)

tests := []struct {
name string
testKey crtype.ReportTestIdentification
reqOpts crtype.RequestOptions
baseStatus map[string]crtype.TestStatus
expectedStatus map[string]crtype.TestStatus
baseStatus *crtype.ReportTestStats
expectedStatus *crtype.ReportTestStats
}{
{
name: "swap base stats using regression allowance",
reqOpts: reqOpts419,
baseStatus: map[string]crtype.TestStatus{
test1KeyStr: buildTestStatus("test1", test1Variants, 100, 75, 0, nil),
},
expectedStatus: map[string]crtype.TestStatus{
test1KeyStr: buildTestStatus("test1", test1Variants, 100, 100, 0, nil),
},
name: "swap base stats using regression allowance",
reqOpts: reqOpts419,
testKey: test1Key,
baseStatus: buildTestStatus(100, 75, 0, "4.18"),
expectedStatus: buildTestStatus(100, 100, 0, "4.17"),
},
{
name: "do not swap base stats if no regression allowance",
reqOpts: reqOpts419,
baseStatus: map[string]crtype.TestStatus{
test2KeyStr: buildTestStatus("test2", test1Variants, 100, 75, 0, nil),
},
expectedStatus: map[string]crtype.TestStatus{
test2KeyStr: buildTestStatus("test2", test1Variants, 100, 75, 0, nil),
},
name: "do not swap base stats if no regression allowance",
reqOpts: reqOpts419,
testKey: test2Key,
baseStatus: buildTestStatus(100, 75, 0, "4.18"),
expectedStatus: buildTestStatus(100, 75, 0, "4.18"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
rfb := NewRegressionAllowancesMiddleware(test.reqOpts)
rfb.regressionGetterFunc = regressionGetter
status := &crtype.ReportTestStatus{BaseStatus: test.baseStatus}
err := rfb.Transform(status)
err := rfb.PreAnalysis(test.testKey, test.baseStatus)
assert.NoError(t, err)
assert.Equal(t, test.expectedStatus, status.BaseStatus)
assert.Equal(t, *test.expectedStatus, *test.baseStatus)
})
}
}

//nolint:unparam
func buildTestStatus(testName string, variants []string, total, success, flake int, release *crtype.Release) crtype.TestStatus {
return crtype.TestStatus{
TestName: testName,
TestSuite: "conformance",
Component: "foo",
Capabilities: nil,
Variants: variants,
TotalCount: total,
SuccessCount: success,
FlakeCount: flake,
Release: release,
func buildTestStatus(total, success, flake int, baseRelease string) *crtype.ReportTestStats {
fails := total - success - flake
ts := &crtype.ReportTestStats{
BaseStats: &crtype.TestDetailsReleaseStats{
Release: baseRelease,
TestDetailsTestStats: crtype.TestDetailsTestStats{
FailureCount: fails,
SuccessCount: success,
FlakeCount: flake,
SuccessRate: utils.CalculatePassRate(success, fails, flake, false),
},
},
}
return ts
}
Loading