Skip to content

Commit

Permalink
added logic for cli
Browse files Browse the repository at this point in the history
  • Loading branch information
Yuvraj committed Aug 31, 2020
1 parent 2243728 commit 0bc1b3b
Show file tree
Hide file tree
Showing 13 changed files with 140 additions and 65 deletions.
1 change: 1 addition & 0 deletions cmd/kyverno/main.go
Expand Up @@ -346,6 +346,7 @@ func main() {
go statusSync.Run(1, stopCh)
go pCacheController.Run(1, stopCh)
go auditHandler.Run(10, stopCh)
go jobController.Run(1,stopCh)
openAPISync.Run(1, stopCh)

// verifys if the admission control is enabled and active
Expand Down
4 changes: 2 additions & 2 deletions definitions/install.yaml
Expand Up @@ -1144,6 +1144,7 @@ rules:
- networkpolicies
- secrets
- configmaps
- jobs
- resourcequotas
- limitranges
- clusterroles
Expand Down Expand Up @@ -1510,7 +1511,6 @@ spec:
- args:
- --filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]
- -v=2
- --policyreport=policyreport
env:
- name: INIT_CONFIG
value: init-config
Expand All @@ -1520,7 +1520,7 @@ spec:
fieldPath: metadata.namespace
- name: KYVERNO_SVC
value: kyverno-svc
image: evalsocket/kyverno:v1.1.10-25-g3ebf9d43fc6c
image: nirmata/kyverno:v1.1.10
imagePullPolicy: Always
livenessProbe:
failureThreshold: 4
Expand Down
1 change: 1 addition & 0 deletions definitions/install_debug.yaml
Expand Up @@ -1144,6 +1144,7 @@ rules:
- networkpolicies
- secrets
- configmaps
- jobs
- resourcequotas
- limitranges
- clusterroles
Expand Down
1 change: 1 addition & 0 deletions definitions/k8s-resource/rbac.yaml
Expand Up @@ -221,6 +221,7 @@ rules:
- networkpolicies
- secrets
- configmaps
- jobs
- resourcequotas
- limitranges
- clusterroles
Expand Down
8 changes: 8 additions & 0 deletions go.mod
Expand Up @@ -4,6 +4,11 @@ go 1.13

require (
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/evanphx/json-patch v4.5.0+incompatible
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/go-logr/logr v0.1.0
Expand All @@ -16,8 +21,11 @@ require (
github.com/julienschmidt/httprouter v1.3.0
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5
github.com/morikuni/aec v1.0.0 // indirect
github.com/onsi/ginkgo v1.11.0
github.com/onsi/gomega v1.8.1
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.0.0
github.com/stretchr/testify v1.4.0
Expand Down
48 changes: 39 additions & 9 deletions pkg/jobs/controller.go
Expand Up @@ -6,8 +6,11 @@ import (
v1 "k8s.io/api/batch/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"reflect"
"strings"
"sync"
"time"

"github.com/go-logr/logr"

Expand Down Expand Up @@ -222,7 +225,7 @@ func (j *Job) syncNamespace(wg *sync.WaitGroup, jobType, scope, policy string) {
}()
var args []string
var mode string
if len(policy) > 0 {
if policy == "POLICY" {
mode = "cli"
} else {
mode = "configmap"
Expand All @@ -236,50 +239,77 @@ func (j *Job) syncNamespace(wg *sync.WaitGroup, jobType, scope, policy string) {
"helm",
fmt.Sprintf("--mode=%s", mode),
}
job = CreateJob(append(args, "helm"), jobType, scope)
job = CreateJob(args, jobType, scope)
break
case "NAMESPACE":
args = []string{
"report",
"namespace",
fmt.Sprintf("--mode=%s", mode),
}
job = CreateJob(append(args, "namespace"), jobType, scope)
job = CreateJob(args, jobType, scope)
break
case "CLUSTER":
args = []string{
"report",
"cluster",
fmt.Sprintf("--mode=%s", mode),
}
job = CreateJob(append(args, "cluster"), jobType, scope)
job = CreateJob(args, jobType, scope)
break
}
_, err := j.dclient.UpdateStatusResource("", "Job", config.KubePolicyNamespace, job, false)
_, err := j.dclient.CreateResource("", "Job", config.KubePolicyNamespace, job, false)
if err != nil {
return
}
deadline := time.Now().Add(15 * time.Second)
var failure bool
for {
resource, err := j.dclient.GetResource("", "Job", config.KubePolicyNamespace, job.GetName())
if err != nil {
continue
}
job := v1.Job{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(resource.UnstructuredContent(), &job); err != nil {
failure = true
break
}
if job.Status.Active == 0 || time.Now().After(deadline) {
failure = true
break
}
}
if failure {
err := j.dclient.DeleteResource("", "Job", config.KubePolicyNamespace, job.GetName(),false)
if err != nil {
return
}
}
return
}

func CreateJob(args []string, jobType, scope string) *v1.Job {
return &v1.Job{
job := &v1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", jobType, scope),
Namespace: config.KubePolicyNamespace,
},
Spec: v1.JobSpec{
Template: apiv1.PodTemplateSpec{
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: fmt.Sprintf("%s-%s", jobType, scope),
Image: "nirmata/kyverno-cli:latest",
Name: strings.ToLower(fmt.Sprintf("%s-%s", jobType, scope)),
Image: "evalsocket/kyverno-cli:latest",
ImagePullPolicy: "Always",
Args: args,

},
},
RestartPolicy: "OnFailure",
},
},
},
}
job.SetGenerateName("kyverno-policyreport-")
return job
}
2 changes: 1 addition & 1 deletion pkg/kyverno/report/cluster.go
Expand Up @@ -23,7 +23,7 @@ func ClusterCommand() *cobra.Command {
var wg sync.WaitGroup
wg.Add(1)
if mode == "cli" {
go createEngineRespone("", "CLUSTER", &wg, restConfig)
go configmapScan("", "CLUSTER", &wg, restConfig)
wg.Wait()
return nil
}
Expand Down
48 changes: 31 additions & 17 deletions pkg/kyverno/report/common.go
@@ -1,8 +1,8 @@
package report

import (
"encoding/json"
"fmt"
"encoding/json"
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
policyreportv1alpha1 "github.com/nirmata/kyverno/pkg/api/policyreport/v1alpha1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
Expand All @@ -15,9 +15,12 @@ import (
"github.com/nirmata/kyverno/pkg/policyreport"
"github.com/nirmata/kyverno/pkg/utils"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/rest"
"os"
Expand All @@ -34,7 +37,7 @@ const (
Cluster string = "Cluster"
)

func createEngineRespone(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
defer func() {
wg.Done()
}()
Expand Down Expand Up @@ -73,14 +76,16 @@ func createEngineRespone(n, scope string, wg *sync.WaitGroup, restConfig *rest.C
os.Exit(1)
}
} else {
cpolicies = &kyvernov1.ClusterPolicyList{}
policies, err := kclient.KyvernoV1().Policies(n).List(metav1.ListOptions{})
if err != nil {
os.Exit(1)
}
for _, p := range policies.Items {
cp := policy.ConvertPolicyToClusterPolicy(&p)
cpolicies.Items = append(cpolicies.Items, *cp)
}
if err != nil {
os.Exit(1)
}

}

// key uid
Expand Down Expand Up @@ -267,7 +272,7 @@ func createEngineRespone(n, scope string, wg *sync.WaitGroup, restConfig *rest.C
// Create Policy Report
}

func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
func configmapScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
defer func() {
wg.Done()
}()
Expand All @@ -291,31 +296,38 @@ func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config

_ = kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)

configmap, err := dClient.GetResource("", "Configmap", config.KubePolicyNamespace, "kyverno-event")
configmap, err := dClient.GetResource("", "ConfigMap", config.KubePolicyNamespace, "kyverno-event")
if err != nil {

os.Exit(1)
}

genData, _, err := unstructured.NestedMap(configmap.Object, "data")
if err != nil {
var job *v1.ConfigMap
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configmap.UnstructuredContent(), &job); err != nil {
os.Exit(1)
}
jsonString, _ := json.Marshal(genData)
events := policyreport.PVEvent{}
json.Unmarshal(jsonString, &events)
var response map[string][]policyreport.Info
var data []policyreport.Info
if scope == Cluster {
data = events.Cluster
if err := json.Unmarshal([]byte(job.Data["Namespace"]), &response); err != nil {
log.Log.Error(err,"")
}
data = response["cluster"]
} else if scope == Helm {
data = events.Helm[n]
if err := json.Unmarshal([]byte(job.Data["Helm"]), &response); err != nil {
log.Log.Error(err,"")
}
data = response[n]
} else {
data = events.Namespace[n]
if err := json.Unmarshal([]byte(job.Data["Namespace"]), &response); err != nil {
log.Log.Error(err,"")
}
data = response[n]
}
var results map[string][]policyreportv1alpha1.PolicyReportResult

var ns []string
for _, v := range data {
for _, r := range v.Rules {
log.Log.Error(nil, "failed to get resource","",r)
builder := policyreport.NewPrBuilder()
pv := builder.Generate(v)
result := &policyreportv1alpha1.PolicyReportResult{
Expand Down Expand Up @@ -356,6 +368,8 @@ func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config
}
}
}


for k, _ := range results {
if scope == Helm || scope == Namespace {
str := strings.Split(k, "-")
Expand Down
27 changes: 14 additions & 13 deletions pkg/kyverno/report/helm.go
Expand Up @@ -34,20 +34,21 @@ func HelmCommand() *cobra.Command {
}

kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)

ns, err := kubeInformer.Core().V1().Namespaces().Lister().List(labels.Everything())
if err != nil {
os.Exit(1)
}
var wg sync.WaitGroup
wg.Add(len(ns))
for _, n := range ns {
if mode == "cli" {
go createEngineRespone(n.GetName(), "HELM", &wg, restConfig)
wg.Wait()
return nil
if mode == "cli" {
ns, err := kubeInformer.Core().V1().Namespaces().Lister().List(labels.Everything())
if err != nil {
os.Exit(1)
}
var wg sync.WaitGroup
wg.Add(len(ns))
for _, n := range ns {
go configmapScan(n.GetName(), "Helm", &wg, restConfig)
}
go backgroundScan(n.GetName(), "HELM", &wg, restConfig)
wg.Wait()
}else{
var wg sync.WaitGroup
wg.Add(1)
go backgroundScan("", "Helm", &wg, restConfig)
wg.Wait()
return nil
}
Expand Down

0 comments on commit 0bc1b3b

Please sign in to comment.