Skip to content

Commit

Permalink
fix(ScannerCLI): kube-bench and kube-hunter do not respect --delete-s…
Browse files Browse the repository at this point in the history
…can-job flag (#67)

Resolves: #63
Resolves: #61

Signed-off-by: Daniel Pacak <pacak.daniel@gmail.com>
  • Loading branch information
danielpacak committed Jun 23, 2020
1 parent 45a88c3 commit a0f1654
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 20 deletions.
2 changes: 1 addition & 1 deletion pkg/kubebench/converter.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ func (c *converter) Convert(reader io.Reader) (report starboard.CISKubeBenchOutp
Scanner: starboard.Scanner{
Name: "kube-bench",
Vendor: "Aqua Security",
Version: "latest",
Version: kubeBenchVersion,
},
Sections: []starboard.CISKubeBenchSection{},
}
Expand Down
26 changes: 17 additions & 9 deletions pkg/kubebench/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,12 @@ import (
)

const (
kubeBenchContainerName = "kube-bench"
kubeBenchContainerImage = "aquasec/kube-bench:latest"
kubeBenchVersion = "0.3.0"
kubeBenchContainerName = "kube-bench"
)

var (
kubeBenchContainerImage = fmt.Sprintf("aquasec/kube-bench:%s", kubeBenchVersion)
)

type Scanner struct {
Expand All @@ -46,34 +50,38 @@ func NewScanner(opts kube.ScannerOpts, clientset kubernetes.Interface) *Scanner

func (s *Scanner) Scan(ctx context.Context) (report starboard.CISKubeBenchOutput, node *core.Node, err error) {
// 1. Prepare descriptor for the Kubernetes Job which will run kube-bench
kubeBenchJob := s.prepareKubeBenchJob()
job := s.prepareKubeBenchJob()

// 2. Run the prepared Job and wait for its completion or failure
err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, kubeBenchJob))
err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, job))
if err != nil {
err = fmt.Errorf("running kube-bench job: %w", err)
return
}

defer func() {
if !s.opts.DeleteScanJob {
klog.V(3).Infof("Skipping scan job deletion: %s/%s", job.Namespace, job.Name)
return
}
// 6. Delete the kube-bench Job
klog.V(3).Infof("Deleting job: %s/%s", kubeBenchJob.Namespace, kubeBenchJob.Name)
klog.V(3).Infof("Deleting job: %s/%s", job.Namespace, job.Name)
background := meta.DeletePropagationBackground
_ = s.clientset.BatchV1().Jobs(kubeBenchJob.Namespace).Delete(ctx, kubeBenchJob.Name, meta.DeleteOptions{
_ = s.clientset.BatchV1().Jobs(job.Namespace).Delete(ctx, job.Name, meta.DeleteOptions{
PropagationPolicy: &background,
})
}()

// 3. Get the Pod controlled by the kube-bench Job
kubeBenchPod, err := s.pods.GetPodByJob(ctx, kubeBenchJob)
kubeBenchPod, err := s.pods.GetPodByJob(ctx, job)
if err != nil {
err = fmt.Errorf("getting kube-bench pod: %w", err)
return
}

// 4. Get kube-bench JSON output from the kube-bench Pod
klog.V(3).Infof("Getting logs for %s container in job: %s/%s", kubeBenchContainerName,
kubeBenchJob.Namespace, kubeBenchJob.Name)
job.Namespace, job.Name)
logsReader, err := s.pods.GetPodLogs(ctx, kubeBenchPod, kubeBenchContainerName)
if err != nil {
err = fmt.Errorf("getting logs: %w", err)
Expand Down Expand Up @@ -162,7 +170,7 @@ func (s *Scanner) prepareKubeBenchJob() *batch.Job {
{
Name: kubeBenchContainerName,
Image: kubeBenchContainerImage,
ImagePullPolicy: core.PullAlways,
ImagePullPolicy: core.PullIfNotPresent,
TerminationMessagePolicy: core.TerminationMessageFallbackToLogsOnError,
Command: []string{"kube-bench"},
Args: []string{"--json"},
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubehunter/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ func OutputFrom(reader io.Reader) (report sec.KubeHunterOutput, err error) {
report.Scanner = sec.Scanner{
Name: "kube-hunter",
Vendor: "Aqua Security",
Version: "latest",
Version: kubeHunterVersion,
}
err = json.NewDecoder(reader).Decode(&report)
return
Expand Down
26 changes: 17 additions & 9 deletions pkg/kubehunter/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,12 @@ import (
)

const (
kubeHunterContainerName = "kube-hunter"
kubeHunterContainerImage = "aquasec/kube-hunter:latest"
kubeHunterVersion = "0.3.1"
kubeHunterContainerName = "kube-hunter"
)

var (
kubeHunterContainerImage = fmt.Sprintf("aquasec/kube-hunter:%s", kubeHunterVersion)
)

type Scanner struct {
Expand All @@ -43,28 +47,32 @@ func NewScanner(opts kube.ScannerOpts, clientset kubernetes.Interface) *Scanner

func (s *Scanner) Scan(ctx context.Context) (report starboard.KubeHunterOutput, err error) {
// 1. Prepare descriptor for the Kubernetes Job which will run kube-hunter
kubeHunterJob := s.prepareKubeHunterJob()
job := s.prepareKubeHunterJob()

// 2. Run the prepared Job and wait for its completion or failure
err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, kubeHunterJob))
err = runner.New().Run(ctx, kube.NewRunnableJob(s.clientset, job))
if err != nil {
err = fmt.Errorf("running kube-hunter job: %w", err)
return
}

defer func() {
if !s.opts.DeleteScanJob {
klog.V(3).Infof("Skipping scan job deletion: %s/%s", job.Namespace, job.Name)
return
}
// 5. Delete the kube-hunter Job
klog.V(3).Infof("Deleting job: %s/%s", kubeHunterJob.Namespace, kubeHunterJob.Name)
klog.V(3).Infof("Deleting job: %s/%s", job.Namespace, job.Name)
background := meta.DeletePropagationBackground
_ = s.clientset.BatchV1().Jobs(kubeHunterJob.Namespace).Delete(ctx, kubeHunterJob.Name, meta.DeleteOptions{
_ = s.clientset.BatchV1().Jobs(job.Namespace).Delete(ctx, job.Name, meta.DeleteOptions{
PropagationPolicy: &background,
})
}()

// 3. Get kube-hunter JSON output from the kube-hunter Pod
klog.V(3).Infof("Getting logs for %s container in job: %s/%s", kubeHunterContainerName,
kubeHunterJob.Namespace, kubeHunterJob.Name)
logsReader, err := s.pods.GetContainerLogsByJob(ctx, kubeHunterJob, kubeHunterContainerName)
job.Namespace, job.Name)
logsReader, err := s.pods.GetContainerLogsByJob(ctx, job, kubeHunterContainerName)
if err != nil {
err = fmt.Errorf("getting logs: %w", err)
return
Expand Down Expand Up @@ -109,7 +117,7 @@ func (s *Scanner) prepareKubeHunterJob() *batch.Job {
{
Name: kubeHunterContainerName,
Image: kubeHunterContainerImage,
ImagePullPolicy: core.PullAlways,
ImagePullPolicy: core.PullIfNotPresent,
TerminationMessagePolicy: core.TerminationMessageFallbackToLogsOnError,
Command: []string{"python", "kube-hunter.py"},
Args: []string{"--pod", "--report", "json", "--log", "warn"},
Expand Down

0 comments on commit a0f1654

Please sign in to comment.