Skip to content

Commit

Permalink
refactor: Use builder for config audit scan jobs
Browse files Browse the repository at this point in the history
Signed-off-by: Daniel Pacak <pacak.daniel@gmail.com>
  • Loading branch information
danielpacak committed Apr 23, 2021
1 parent 3d7ad49 commit f7fffc0
Show file tree
Hide file tree
Showing 10 changed files with 157 additions and 136 deletions.
91 changes: 91 additions & 0 deletions pkg/configauditreport/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,107 @@ package configauditreport
import (
"fmt"
"strings"
"time"

"github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1"
"github.com/aquasecurity/starboard/pkg/kube"
"github.com/aquasecurity/starboard/pkg/starboard"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)

type ScanJobBuilder struct {
plugin Plugin
pluginContext starboard.PluginContext
timeout time.Duration
object client.Object
}

func NewScanJob() *ScanJobBuilder {
return &ScanJobBuilder{}
}

func (s *ScanJobBuilder) WithPlugin(plugin Plugin) *ScanJobBuilder {
s.plugin = plugin
return s
}

func (s *ScanJobBuilder) WithPluginContext(pluginContext starboard.PluginContext) *ScanJobBuilder {
s.pluginContext = pluginContext
return s
}

func (s *ScanJobBuilder) WithTimeout(timeout time.Duration) *ScanJobBuilder {
s.timeout = timeout
return s
}

func (s *ScanJobBuilder) WithObject(object client.Object) *ScanJobBuilder {
s.object = object
return s
}

func (s *ScanJobBuilder) Get() (*batchv1.Job, []*corev1.Secret, error) {
jobSpec, secrets, err := s.plugin.GetScanJobSpec(s.pluginContext, s.object)
if err != nil {
return nil, nil, err
}

podSpec, err := kube.GetPodSpec(s.object)
if err != nil {
return nil, nil, err
}

podSpecHash := kube.ComputeHash(podSpec)

pluginConfigHash, err := s.plugin.GetConfigHash(s.pluginContext)
if err != nil {
return nil, nil, err
}

labels := map[string]string{
starboard.LabelResourceKind: s.object.GetObjectKind().GroupVersionKind().Kind,
starboard.LabelResourceName: s.object.GetName(),
starboard.LabelResourceNamespace: s.object.GetNamespace(),
starboard.LabelPodSpecHash: podSpecHash,
starboard.LabelPluginConfigHash: pluginConfigHash,
starboard.LabelConfigAuditReportScanner: s.pluginContext.GetName(),
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
}

return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: GetScanJobName(s.object),
Namespace: s.pluginContext.GetNamespace(),
Labels: labels,
},
Spec: batchv1.JobSpec{
BackoffLimit: pointer.Int32Ptr(0),
Completions: pointer.Int32Ptr(1),
ActiveDeadlineSeconds: kube.GetActiveDeadlineSeconds(s.timeout),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: jobSpec,
},
},
}, secrets, nil
}

func GetScanJobName(obj client.Object) string {
return fmt.Sprintf("scan-configauditreport-%s", kube.ComputeHash(kube.Object{
Kind: kube.Kind(obj.GetObjectKind().GroupVersionKind().Kind),
Namespace: obj.GetNamespace(),
Name: obj.GetName(),
}))
}

type Builder interface {
Controller(controller metav1.Object) Builder
PodSpecHash(hash string) Builder
Expand Down
57 changes: 17 additions & 40 deletions pkg/configauditreport/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,13 @@ import (
"fmt"

"github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1"
"github.com/aquasecurity/starboard/pkg/ext"
"github.com/aquasecurity/starboard/pkg/kube"
"github.com/aquasecurity/starboard/pkg/runner"
"github.com/aquasecurity/starboard/pkg/starboard"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)

Expand All @@ -27,7 +23,6 @@ type Scanner struct {
logsReader kube.LogsReader
plugin Plugin
pluginContext starboard.PluginContext
ext.IDGenerator
}

func NewScanner(
Expand All @@ -45,7 +40,6 @@ func NewScanner(
pluginContext: pluginContext,
objectResolver: &kube.ObjectResolver{Client: client},
logsReader: kube.NewLogsReader(clientset),
IDGenerator: ext.NewGoogleUUIDGenerator(),
}
}

Expand All @@ -58,7 +52,12 @@ func (s *Scanner) Scan(ctx context.Context, workload kube.Object) (v1alpha1.Conf
}

klog.V(3).Infof("Scanning with options: %+v", s.opts)
job, secrets, err := s.getScanJob(owner)
job, secrets, err := NewScanJob().
WithPlugin(s.plugin).
WithPluginContext(s.pluginContext).
WithTimeout(s.opts.ScanJobTimeout).
WithObject(owner).
Get()
if err != nil {
return v1alpha1.ConfigAuditReport{}, err
}
Expand Down Expand Up @@ -94,41 +93,19 @@ func (s *Scanner) Scan(ctx context.Context, workload kube.Object) (v1alpha1.Conf
_ = logsStream.Close()
}()

podSpecHash, ok := job.Labels[starboard.LabelPodSpecHash]
if !ok {
return v1alpha1.ConfigAuditReport{}, fmt.Errorf("expected label %s not set", starboard.LabelPodSpecHash)
}
pluginConfigHash, ok := job.Labels[starboard.LabelPluginConfigHash]
if !ok {
return v1alpha1.ConfigAuditReport{}, fmt.Errorf("expected label %s not set", starboard.LabelPluginConfigHash)
}

return NewBuilder(s.scheme).
Controller(owner).
PodSpecHash(podSpecHash).
PluginConfigHash(pluginConfigHash).
Result(result).
Get()
}

func (s *Scanner) getScanJob(obj client.Object) (*batchv1.Job, []*corev1.Secret, error) {
jobSpec, secrets, err := s.plugin.GetScanJobSpec(s.pluginContext, obj)
if err != nil {
return nil, nil, err
}
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: s.GenerateID(),
Namespace: starboard.NamespaceName,
Labels: map[string]string{
starboard.LabelResourceKind: obj.GetObjectKind().GroupVersionKind().Kind,
starboard.LabelResourceName: obj.GetName(),
starboard.LabelResourceNamespace: obj.GetNamespace(),
},
},
Spec: batchv1.JobSpec{
BackoffLimit: pointer.Int32Ptr(0),
Completions: pointer.Int32Ptr(1),
ActiveDeadlineSeconds: kube.GetActiveDeadlineSeconds(s.opts.ScanJobTimeout),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
starboard.LabelResourceKind: obj.GetObjectKind().GroupVersionKind().Kind,
starboard.LabelResourceName: obj.GetName(),
starboard.LabelResourceNamespace: obj.GetNamespace(),
},
},
Spec: jobSpec,
},
},
}, secrets, nil
}
16 changes: 8 additions & 8 deletions pkg/operator/controller/ciskubebenchreport.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,10 @@ func (r *CISKubeBenchReportReconciler) newScanJob(node *corev1.Node) (*batchv1.J
Name: r.getScanJobName(node),
Namespace: r.Config.Namespace,
Labels: labels.Set{
starboard.LabelResourceKind: string(kube.KindNode),
starboard.LabelResourceName: node.Name,
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelKubeBenchReportScan: "true",
starboard.LabelResourceKind: string(kube.KindNode),
starboard.LabelResourceName: node.Name,
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
starboard.LabelKubeBenchReportScanner: "true",
},
},
Spec: batchv1.JobSpec{
Expand All @@ -176,10 +176,10 @@ func (r *CISKubeBenchReportReconciler) newScanJob(node *corev1.Node) (*batchv1.J
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels.Set{
starboard.LabelResourceKind: string(kube.KindNode),
starboard.LabelResourceName: node.Name,
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelKubeBenchReportScan: "true",
starboard.LabelResourceKind: string(kube.KindNode),
starboard.LabelResourceName: node.Name,
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
starboard.LabelKubeBenchReportScanner: "true",
},
},
Spec: templateSpec,
Expand Down
66 changes: 9 additions & 57 deletions pkg/operator/controller/configauditreport.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -139,7 +138,7 @@ func (r *ConfigAuditReportReconciler) reconcileWorkload(workloadKind kube.Kind)
}

log.V(1).Info("Checking whether configuration audit has been scheduled")
_, job, err := r.hasActiveScanJob(ctx, workloadPartial, podSpecHash)
_, job, err := r.hasActiveScanJob(ctx, workloadObj, podSpecHash)
if err != nil {
return ctrl.Result{}, err
}
Expand All @@ -160,7 +159,12 @@ func (r *ConfigAuditReportReconciler) reconcileWorkload(workloadKind kube.Kind)
return ctrl.Result{RequeueAfter: r.Config.ScanJobRetryAfter}, nil
}

job, secrets, err := r.getScanJob(workloadPartial, workloadObj, podSpecHash)
job, secrets, err := configauditreport.NewScanJob().
WithPlugin(r.Plugin).
WithPluginContext(r.PluginContext).
WithTimeout(r.Config.ScanJobTimeout).
WithObject(workloadObj).
Get()
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -211,8 +215,8 @@ func (r *ConfigAuditReportReconciler) hasReport(ctx context.Context, owner kube.
return false, nil
}

func (r *ConfigAuditReportReconciler) hasActiveScanJob(ctx context.Context, owner kube.Object, hash string) (bool, *batchv1.Job, error) {
jobName := r.getScanJobName(owner)
func (r *ConfigAuditReportReconciler) hasActiveScanJob(ctx context.Context, obj client.Object, hash string) (bool, *batchv1.Job, error) {
jobName := configauditreport.GetScanJobName(obj)
job := &batchv1.Job{}
err := r.Client.Get(ctx, client.ObjectKey{Namespace: r.Config.Namespace, Name: jobName}, job)
if err != nil {
Expand All @@ -227,58 +231,6 @@ func (r *ConfigAuditReportReconciler) hasActiveScanJob(ctx context.Context, owne
return false, nil, nil
}

func (r *ConfigAuditReportReconciler) getScanJobName(workload kube.Object) string {
return fmt.Sprintf("scan-configauditreport-%s", kube.ComputeHash(workload))
}

func (r *ConfigAuditReportReconciler) getScanJob(workload kube.Object, obj client.Object, podSpecHash string) (*batchv1.Job, []*corev1.Secret, error) {
jobSpec, secrets, err := r.Plugin.GetScanJobSpec(r.PluginContext, obj)

if err != nil {
return nil, nil, err
}

configHash, err := r.Plugin.GetConfigHash(r.PluginContext)
if err != nil {
return nil, nil, err
}

return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: r.getScanJobName(workload),
Namespace: r.Config.Namespace,
Labels: map[string]string{
starboard.LabelResourceKind: string(workload.Kind),
starboard.LabelResourceName: workload.Name,
starboard.LabelResourceNamespace: workload.Namespace,
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelPodSpecHash: podSpecHash,
starboard.LabelPluginConfigHash: configHash,
starboard.LabelConfigAuditReportScan: "true",
},
},
Spec: batchv1.JobSpec{
BackoffLimit: pointer.Int32Ptr(0),
Completions: pointer.Int32Ptr(1),
ActiveDeadlineSeconds: kube.GetActiveDeadlineSeconds(r.Config.ScanJobTimeout),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
starboard.LabelResourceKind: string(workload.Kind),
starboard.LabelResourceName: workload.Name,
starboard.LabelResourceNamespace: workload.Namespace,
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelPodSpecHash: podSpecHash,
starboard.LabelPluginConfigHash: configHash,
starboard.LabelConfigAuditReportScan: "true",
},
},
Spec: jobSpec,
},
},
}, secrets, nil
}

func (r *ConfigAuditReportReconciler) reconcileJobs() reconcile.Func {
return func(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := r.Logger.WithValues("job", req.NamespacedName)
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/controller/limit_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (c *checker) Check(ctx context.Context) (bool, int, error) {
func (c *checker) countScanJobs(ctx context.Context) (int, error) {
var scanJobs batchv1.JobList
err := c.client.List(ctx, &scanJobs, client.MatchingLabels{
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
}, client.InNamespace(c.config.Namespace))
if err != nil {
return 0, err
Expand Down
8 changes: 4 additions & 4 deletions pkg/operator/controller/limit_checker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,21 +34,21 @@ var _ = Describe("LimitChecker", func() {
Name: "scan-vulnerabilityreport-hash1",
Namespace: "starboard-operator",
Labels: map[string]string{
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
},
}},
&batchv1.Job{ObjectMeta: metav1.ObjectMeta{
Name: "scan-vulnerabilityreport-hash2",
Namespace: "starboard-operator",
Labels: map[string]string{
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
},
}},
&batchv1.Job{ObjectMeta: metav1.ObjectMeta{
Name: "scan-configauditreport-hash2",
Namespace: "starboard-operator",
Labels: map[string]string{
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
},
}},
).Build()
Expand All @@ -74,7 +74,7 @@ var _ = Describe("LimitChecker", func() {
Name: "scan-vulnerabilityreport-hash1",
Namespace: "starboard-operator",
Labels: map[string]string{
starboard.LabelK8SAppManagedBy: starboard.AppStarboardOperator,
starboard.LabelK8SAppManagedBy: starboard.AppStarboard,
},
}},
).Build()
Expand Down

0 comments on commit f7fffc0

Please sign in to comment.