Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

internal/pkg/scorecard: implement plugin system #1379

Merged
merged 15 commits into from
May 15, 2019
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/operator-sdk/scorecard/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@ func NewCmd() *cobra.Command {
scorecardCmd.Flags().String(scorecard.CSVPathOpt, "", "Path to CSV being tested")
scorecardCmd.Flags().Bool(scorecard.BasicTestsOpt, true, "Enable basic operator checks")
scorecardCmd.Flags().Bool(scorecard.OLMTestsOpt, true, "Enable OLM integration checks")
scorecardCmd.Flags().Bool(scorecard.TenantTestsOpt, false, "Enable good tenant checks")
scorecardCmd.Flags().String(scorecard.NamespacedManifestOpt, "", "Path to manifest for namespaced resources (e.g. RBAC and Operator manifest)")
scorecardCmd.Flags().String(scorecard.GlobalManifestOpt, "", "Path to manifest for Global resources (e.g. CRD manifests)")
scorecardCmd.Flags().StringSlice(scorecard.CRManifestOpt, nil, "Path to manifest for Custom Resource (required) (specify flag multiple times for multiple CRs)")
scorecardCmd.Flags().String(scorecard.ProxyImageOpt, fmt.Sprintf("quay.io/operator-framework/scorecard-proxy:%s", strings.TrimSuffix(version.Version, "+git")), "Image name for scorecard proxy")
scorecardCmd.Flags().String(scorecard.ProxyPullPolicyOpt, "Always", "Pull policy for scorecard proxy image")
scorecardCmd.Flags().String(scorecard.CRDsDirOpt, scaffold.CRDsDir, "Directory containing CRDs (all CRD manifest filenames must have the suffix 'crd.yaml')")
scorecardCmd.Flags().StringP(scorecard.OutputFormatOpt, "o", "human-readable", "Output format for results. Valid values: human-readable, json")
scorecardCmd.Flags().String(scorecard.PluginDirOpt, "scorecard", "Scorecard plugin directory (plugin exectuables must be in a \"bin\" subdirectory")

if err := viper.BindPFlags(scorecardCmd.Flags()); err != nil {
log.Fatalf("Failed to bind scorecard flags to viper: %v", err)
Expand Down
4 changes: 3 additions & 1 deletion hack/tests/scorecard-subcommand.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ commandoutput="$(operator-sdk scorecard \
--proxy-image "$DEST_IMAGE" \
--proxy-pull-policy Never \
2>&1)"
echo $commandoutput | grep "Total Score: 82%"
echo $commandoutput | grep "Total Score: 79%"

# test config file
commandoutput2="$(operator-sdk scorecard \
Expand All @@ -30,4 +30,6 @@ commandoutput2="$(operator-sdk scorecard \
echo $commandoutput2 | grep '^.*"error": 0,[[:space:]]"pass": 3,[[:space:]]"partialPass": 0,[[:space:]]"fail": 0,[[:space:]]"totalTests": 3,[[:space:]]"totalScorePercent": 100,.*$'
# check olm suite
echo $commandoutput2 | grep '^.*"error": 0,[[:space:]]"pass": 2,[[:space:]]"partialPass": 2,[[:space:]]"fail": 1,[[:space:]]"totalTests": 5,[[:space:]]"totalScorePercent": 65,.*$'
# check custom json result
echo $commandoutput2 | grep '^.*"error": 0,[[:space:]]"pass": 1,[[:space:]]"partialPass": 1,[[:space:]]"fail": 0,[[:space:]]"totalTests": 2,[[:space:]]"totalScorePercent": 71,.*$'
popd
54 changes: 40 additions & 14 deletions internal/pkg/scorecard/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,19 +83,7 @@ func ResultsCumulative(results []TestResult) (TestResult, error) {
func CalculateResult(tests []scapiv1alpha1.ScorecardTestResult) scapiv1alpha1.ScorecardSuiteResult {
scorecardSuiteResult := scapiv1alpha1.ScorecardSuiteResult{}
scorecardSuiteResult.Tests = tests
for _, test := range scorecardSuiteResult.Tests {
scorecardSuiteResult.TotalTests++
switch test.State {
case scapiv1alpha1.ErrorState:
scorecardSuiteResult.Error++
case scapiv1alpha1.PassState:
scorecardSuiteResult.Pass++
case scapiv1alpha1.PartialPassState:
scorecardSuiteResult.PartialPass++
case scapiv1alpha1.FailState:
scorecardSuiteResult.Fail++
}
}
scorecardSuiteResult = UpdateSuiteStates(scorecardSuiteResult)
return scorecardSuiteResult
}

Expand Down Expand Up @@ -147,7 +135,7 @@ func TestResultToScorecardTestResult(tr TestResult) scapiv1alpha1.ScorecardTestR
}

// UpdateState updates the state of a TestResult.
func UpdateState(res TestResult) TestResult {
func UpdateState(res scapiv1alpha1.ScorecardTestResult) scapiv1alpha1.ScorecardTestResult {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have a handful of exported helper functions for the scapiv1alpha1 types in this package. I'd suggest moving them into the scapiv1alpha1 package, possibly making them methods on the scapiv1alpha1 structs (where that makes sense), and also possibly unexporting them if we think they'll only be used on the SDK side of the plugin interface.

This could be in a follow-on PR. Since we're going to reorganize our built-in scorecard tests as plugins, I'd be interested in your thoughts on a design for a plugin library that would make it easy to write a plugin in go.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I'll make a PR to expose some of these. I was initially intending to expose some of this with the JSON PR, but it wasn't quite ready. Now that we have the plugin support in place, we can probably expose the helpers.

if res.State == scapiv1alpha1.ErrorState {
return res
}
Expand All @@ -161,3 +149,41 @@ func UpdateState(res TestResult) TestResult {
return res
// TODO: decide what to do if a Test incorrectly sets points (Earned > Max)
}

// UpdateSuiteStates update the state of each test in a suite and updates the count to the suite's states to match
func UpdateSuiteStates(suite scapiv1alpha1.ScorecardSuiteResult) scapiv1alpha1.ScorecardSuiteResult {
suite.TotalTests = len(suite.Tests)
// reset all state values
suite.Error = 0
suite.Fail = 0
suite.PartialPass = 0
suite.Pass = 0
for idx, test := range suite.Tests {
suite.Tests[idx] = UpdateState(test)
switch test.State {
case scapiv1alpha1.ErrorState:
suite.Error++
case scapiv1alpha1.PassState:
suite.Pass++
case scapiv1alpha1.PartialPassState:
suite.PartialPass++
case scapiv1alpha1.FailState:
suite.Fail++
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should probably have a default case to catch and log any unknown state.
Even though it doesn't seem like we'll end up in an unknown state, it's still good practice in case we change it in the future.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll do this in a future PR. The issue is that there is no way to really handle this cleanly right now. One way to fix this would be to add another field to the SuiteResults of invalid. That way we can indicate to users that a plugin returned an invalid state. Another option (which is what I want to do in a future PR) is to change the state from whatever incorrect state it was in to error and add a string to the errors array indicating that the scorecard marked this test as failed due to an invalid state.

Technically though, due to the way the helpers are set up (we calculate the state using the earned vs max points unless the state is error), the only situation where this could occur (currently) would be if the earned points are higher than the max. I'd be fine marking that as an error.

}
return suite
}

func CombineScorecardOutput(outputs []scapiv1alpha1.ScorecardOutput, log string) scapiv1alpha1.ScorecardOutput {
output := scapiv1alpha1.ScorecardOutput{
TypeMeta: metav1.TypeMeta{
Kind: "ScorecardOutput",
APIVersion: "osdk.scorecard.com/v1alpha1",
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
},
Log: log,
}
for _, item := range outputs {
output.Results = append(output.Results, item.Results...)
}
return output
}
121 changes: 94 additions & 27 deletions internal/pkg/scorecard/scorecard.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,13 @@ import (
"io"
"io/ioutil"
"os"
"os/exec"

"github.com/operator-framework/operator-sdk/internal/pkg/scaffold"
k8sInternal "github.com/operator-framework/operator-sdk/internal/util/k8sutil"
"github.com/operator-framework/operator-sdk/internal/util/projutil"
"github.com/operator-framework/operator-sdk/internal/util/yamlutil"
scapiv1alpha1 "github.com/operator-framework/operator-sdk/pkg/apis/scorecard/v1alpha1"

"github.com/ghodss/yaml"
olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
Expand Down Expand Up @@ -58,20 +60,19 @@ const (
CSVPathOpt = "csv-path"
BasicTestsOpt = "basic-tests"
OLMTestsOpt = "olm-tests"
TenantTestsOpt = "good-tenant-tests"
NamespacedManifestOpt = "namespaced-manifest"
GlobalManifestOpt = "global-manifest"
CRManifestOpt = "cr-manifest"
ProxyImageOpt = "proxy-image"
ProxyPullPolicyOpt = "proxy-pull-policy"
CRDsDirOpt = "crds-dir"
OutputFormatOpt = "output"
PluginDirOpt = "plugin-dir"
)

const (
basicOperator = "Basic Operator"
olmIntegration = "OLM Integration"
goodTenant = "Good Tenant"
)

var (
Expand All @@ -95,7 +96,7 @@ var (
log = logrus.New()
)

func runTests() ([]TestSuite, error) {
func runTests() ([]scapiv1alpha1.ScorecardOutput, error) {
defer func() {
if err := cleanupScorecard(); err != nil {
log.Errorf("Failed to cleanup resources: (%v)", err)
Expand Down Expand Up @@ -254,8 +255,11 @@ func runTests() ([]TestSuite, error) {
dupMap[gvk] = true
}

var pluginResults []scapiv1alpha1.ScorecardOutput
var suites []TestSuite
for _, cr := range crs {
// TODO: Change built-in tests into plugins
// Run built-in tests.
fmt.Printf("Running for cr: %s\n", cr)
if !viper.GetBool(OlmDeployedOpt) {
if err := createFromYAMLFile(viper.GetString(GlobalManifestOpt)); err != nil {
Expand All @@ -275,8 +279,6 @@ func runTests() ([]TestSuite, error) {
if err := waitUntilCRStatusExists(obj); err != nil {
return nil, fmt.Errorf("failed waiting to check if CR status exists: %v", err)
}

// Run tests.
if viper.GetBool(BasicTestsOpt) {
conf := BasicTestConfig{
Client: runtimeClient,
Expand Down Expand Up @@ -310,7 +312,63 @@ func runTests() ([]TestSuite, error) {
if err != nil {
return nil, fmt.Errorf("failed to merge test suite results: %v", err)
}
return suites, nil
for _, suite := range suites {
// convert to ScorecardOutput format
// will add log when basic and olm tests are separated into plugins
pluginResults = append(pluginResults, TestSuitesToScorecardOutput([]TestSuite{suite}, ""))
}
// Cleanup built-in test resources before running plugins
if err := cleanupScorecard(); err != nil {
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
log.Errorf("Failed to cleanup resources: (%v)", err)
}
// Run plugins
pluginDir := viper.GetString(PluginDirOpt)
if dir, err := os.Stat(pluginDir); err != nil || !dir.IsDir() {
log.Warnf("Plugin directory not found; skipping plugin tests")
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
} else {
if err := os.Chdir(pluginDir); err != nil {
return nil, fmt.Errorf("failed to chdir into scorecard plugin directory")
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
}
// executable files must be in "bin" subdirectory
files, err := ioutil.ReadDir("bin")
if err != nil {
return nil, fmt.Errorf("failed to list files in %s/bin", pluginDir)
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
}
for _, file := range files {
cmd := exec.Command("bash", "-c", "./bin/"+file.Name())
joelanford marked this conversation as resolved.
Show resolved Hide resolved
output, err := cmd.CombinedOutput()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In this case, it might make sense to capture stdout and stderr separately. We'd parse the stdout as JSON and maybe include the stderr in a log?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where exactly would we put the log? We provide a per-suite log field for plugins to use so they shouldn't be outputting anything to stdout or stderr except the JSON or potentially fatal errors. I assume we could add it into the main scorecard log (in the final ScorecardOutput), but that could get messy pretty quickly

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah that's a good question. Wdyt of checking if result.Log is empty. If so, set result.Log to the stderr buffer. If not, log a warning that would be seen in the human-readable output?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That sounds reasonable. I'll do that.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually, there's a problem with that. A single plugin can have more than 1 suite. If that's the case, we won't be able to simply add the stderr to a log field.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The way I'll handle this for now is using log.Warn for the stderr output, which will print it nicely in the human-readable output or add it to the main log for the json output.

if err != nil {
failedPlugin := scapiv1alpha1.ScorecardOutput{}
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
failedResult := scapiv1alpha1.ScorecardSuiteResult{}
failedResult.Name = fmt.Sprintf("Failed Plugin: %s", file.Name())
failedResult.Description = fmt.Sprintf("Plugin with file name %s", file.Name())
failedResult.Error = 1
failedResult.Log = string(output)
failedPlugin.Results = append(failedPlugin.Results, failedResult)
pluginResults = append(pluginResults, failedPlugin)
// output error to main logger as well for human-readable output
log.Errorf("Plugin `%s` failed with error (%v)", file.Name(), err)
continue
}
//parse output and add to suites
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
result := scapiv1alpha1.ScorecardOutput{}
err = json.Unmarshal(output, &result)
if err != nil {
failedPlugin := scapiv1alpha1.ScorecardOutput{}
failedResult := scapiv1alpha1.ScorecardSuiteResult{}
failedResult.Name = fmt.Sprintf("Failed Plugin: %s", file.Name())
failedResult.Description = fmt.Sprintf("Plugin with file name `%s` failed", file.Name())
failedResult.Error = 1
failedResult.Log = string(output)
failedPlugin.Results = append(failedPlugin.Results, failedResult)
pluginResults = append(pluginResults, failedPlugin)
log.Errorf("Output from plugin `%s` failed to unmarshal with error (%v)", file.Name(), err)
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
continue
}
pluginResults = append(pluginResults, result)
}
}
return pluginResults, nil
}

func ScorecardTests(cmd *cobra.Command, args []string) error {
Expand All @@ -321,42 +379,51 @@ func ScorecardTests(cmd *cobra.Command, args []string) error {
return err
}
cmd.SilenceUsage = true
suites, err := runTests()
pluginOutputs, err := runTests()
if err != nil {
return err
}
totalScore := 0.0
// Update the state for the tests
for _, suite := range suites {
for idx, res := range suite.TestResults {
suite.TestResults[idx] = UpdateState(res)
for _, suite := range pluginOutputs {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A for inside another shows not be the best solution. Note the Big O here. Could it not cause performance issues? Could it not be improved to achieve a better performance?

for idx, res := range suite.Results {
suite.Results[idx] = UpdateSuiteStates(res)
}
}
if viper.GetString(OutputFormatOpt) == "human-readable" {
AlexNPavel marked this conversation as resolved.
Show resolved Hide resolved
for _, suite := range suites {
fmt.Printf("%s:\n", suite.GetName())
for _, result := range suite.TestResults {
fmt.Printf("\t%s: %d/%d\n", result.Test.GetName(), result.EarnedPoints, result.MaximumPoints)
numSuites := 0
for _, plugin := range pluginOutputs {
for _, suite := range plugin.Results {
fmt.Printf("%s:\n", suite.Name)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The same point of attention in the impl. The complexity of this impl.

for _, result := range suite.Tests {
fmt.Printf("\t%s: %d/%d\n", result.Name, result.EarnedPoints, result.MaximumPoints)
}
totalScore += float64(suite.TotalScore)
numSuites++
}
totalScore += float64(suite.TotalScore())
}
totalScore = totalScore / float64(len(suites))
totalScore = totalScore / float64(numSuites)
fmt.Printf("\nTotal Score: %.0f%%\n", totalScore)
// TODO: We can probably use some helper functions to clean up these quadruple nested loops
// Print suggestions
for _, suite := range suites {
for _, result := range suite.TestResults {
for _, suggestion := range result.Suggestions {
// 33 is yellow (specifically, the same shade of yellow that logrus uses for warnings)
fmt.Printf("\x1b[%dmSUGGESTION:\x1b[0m %s\n", 33, suggestion)
for _, plugin := range pluginOutputs {
for _, suite := range plugin.Results {
for _, result := range suite.Tests {
for _, suggestion := range result.Suggestions {
// 33 is yellow (specifically, the same shade of yellow that logrus uses for warnings)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A loop the inside of another loop 4 times. Is not possible to solve this need in a more performative way?

fmt.Printf("\x1b[%dmSUGGESTION:\x1b[0m %s\n", 33, suggestion)
}
}
}
}
// Print errors
for _, suite := range suites {
for _, result := range suite.TestResults {
for _, err := range result.Errors {
// 31 is red (specifically, the same shade of red that logrus uses for errors)
fmt.Printf("\x1b[%dmERROR:\x1b[0m %s\n", 31, err)
for _, plugin := range pluginOutputs {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A loop the inside of another loop 4 times. Is not possible to solve this need in a more performative way?

for _, suite := range plugin.Results {
for _, result := range suite.Tests {
for _, err := range result.Errors {
// 31 is red (specifically, the same shade of red that logrus uses for errors)
fmt.Printf("\x1b[%dmERROR:\x1b[0m %s\n", 31, err)
}
}
}
}
Expand All @@ -366,7 +433,7 @@ func ScorecardTests(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("failed to read log buffer: %v", err)
}
scTest := TestSuitesToScorecardOutput(suites, string(log))
scTest := CombineScorecardOutput(pluginOutputs, string(log))
// Pretty print so users can also read the json output
bytes, err := json.MarshalIndent(scTest, "", " ")
if err != nil {
Expand Down
7 changes: 5 additions & 2 deletions internal/pkg/scorecard/test_definitions.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,11 @@ func (ts *TestSuite) TotalScore() (score int) {
for _, weight := range ts.Weights {
addedWeights += weight
}
floatScore = floatScore * (100 / addedWeights)
return int(floatScore)
// protect against divide by zero for failed plugins
if addedWeights == 0 {
return 0
}
return int(floatScore * (100 / addedWeights))
}

// Run runs all Tests in a TestSuite
Expand Down
42 changes: 42 additions & 0 deletions test/test-framework/scorecard/assets/output.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
{
"kind": "ScorecardOutput",
"apiVersion": "scorecard/v1alpha1",
"metadata": {
"creationTimestamp": null
},
"log": "",
"results": [
{
"name": "Custom Scorecard",
"description": "Custom operator scorecard tests",
"error": 0,
"pass": 1,
"partialPass": 1,
"fail": 0,
"totalTests": 2,
"totalScorePercent": 71,
"tests": [
{
"state": "partial_pass",
"name": "Operator Actions Reflected In Status",
"description": "The operator updates the Custom Resources status when the application state is updated",
"earnedPoints": 2,
"maximumPoints": 3,
"suggestions": [
"Operator should update status when scaling cluster down"
],
"errors": []
},
{
"state": "pass",
"name": "Verify health of cluster",
"description": "The cluster created by the operator is working properly",
"earnedPoints": 1,
"maximumPoints": 1,
"suggestions": [],
"errors": []
}
]
}
]
}
2 changes: 2 additions & 0 deletions test/test-framework/scorecard/bin/print-json.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/bin/bash
cat ./assets/output.json