-
Notifications
You must be signed in to change notification settings - Fork 197
/
scanner.go
147 lines (133 loc) · 4.19 KB
/
scanner.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
package kubehunter
import (
"context"
"fmt"
"github.com/aquasecurity/starboard/pkg/apis/aquasecurity/v1alpha1"
"github.com/aquasecurity/starboard/pkg/kube"
"github.com/aquasecurity/starboard/pkg/kube/pod"
"github.com/aquasecurity/starboard/pkg/runner"
"github.com/aquasecurity/starboard/pkg/scanners"
"github.com/aquasecurity/starboard/pkg/starboard"
"github.com/google/uuid"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/klog"
"k8s.io/utils/pointer"
)
const (
kubeHunterVersion = "0.3.1"
kubeHunterContainerName = "kube-hunter"
)
var (
kubeHunterContainerImage = fmt.Sprintf("aquasec/kube-hunter:%s", kubeHunterVersion)
)
type Scanner struct {
scheme *runtime.Scheme
clientset kubernetes.Interface
opts kube.ScannerOpts
pods *pod.Manager
}
func NewScanner(
scheme *runtime.Scheme,
clientset kubernetes.Interface,
opts kube.ScannerOpts,
) *Scanner {
return &Scanner{
scheme: scheme,
opts: opts,
clientset: clientset,
pods: pod.NewPodManager(clientset),
}
}
func (s *Scanner) Scan(ctx context.Context) (report v1alpha1.KubeHunterOutput, err error) {
// 1. Prepare descriptor for the Kubernetes Job which will run kube-hunter
job := s.prepareKubeHunterJob()
// 2. Run the prepared Job and wait for its completion or failure
err = runner.New().Run(ctx, kube.NewRunnableJob(s.scheme, s.clientset, job))
if err != nil {
err = fmt.Errorf("running kube-hunter job: %w", err)
return
}
defer func() {
if !s.opts.DeleteScanJob {
klog.V(3).Infof("Skipping scan job deletion: %s/%s", job.Namespace, job.Name)
return
}
// 5. Delete the kube-hunter Job
klog.V(3).Infof("Deleting job: %s/%s", job.Namespace, job.Name)
background := metav1.DeletePropagationBackground
_ = s.clientset.BatchV1().Jobs(job.Namespace).Delete(ctx, job.Name, metav1.DeleteOptions{
PropagationPolicy: &background,
})
}()
// 3. Get kube-hunter JSON output from the kube-hunter Pod
klog.V(3).Infof("Getting logs for %s container in job: %s/%s", kubeHunterContainerName,
job.Namespace, job.Name)
logsReader, err := s.pods.GetContainerLogsByJob(ctx, job, kubeHunterContainerName)
if err != nil {
err = fmt.Errorf("getting logs: %w", err)
return
}
defer func() {
_ = logsReader.Close()
}()
// 4. Parse the KubeHuberOutput from the logs Reader
report, err = OutputFrom(logsReader)
if err != nil {
err = fmt.Errorf("parsing kube hunter report: %w", err)
return
}
return
}
func (s *Scanner) prepareKubeHunterJob() *batchv1.Job {
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: uuid.New().String(),
Namespace: starboard.NamespaceName,
Labels: map[string]string{
"app": "kube-hunter",
},
},
Spec: batchv1.JobSpec{
BackoffLimit: pointer.Int32Ptr(0),
Completions: pointer.Int32Ptr(1),
ActiveDeadlineSeconds: scanners.GetActiveDeadlineSeconds(s.opts.ScanJobTimeout),
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "kube-hunter",
},
},
Spec: corev1.PodSpec{
ServiceAccountName: starboard.ServiceAccountName,
RestartPolicy: corev1.RestartPolicyNever,
HostPID: true,
Containers: []corev1.Container{
{
Name: kubeHunterContainerName,
Image: kubeHunterContainerImage,
ImagePullPolicy: corev1.PullIfNotPresent,
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
Command: []string{"python", "kube-hunter.py"},
Args: []string{"--pod", "--report", "json", "--log", "warn"},
Resources: corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("300m"),
corev1.ResourceMemory: resource.MustParse("400M"),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("100M"),
},
},
},
},
},
},
},
}
}