Skip to content

Commit

Permalink
remove debug lines
Browse files Browse the repository at this point in the history
  • Loading branch information
matmerr committed Oct 21, 2022
1 parent ae9d23b commit be1a818
Show file tree
Hide file tree
Showing 13 changed files with 33 additions and 68 deletions.
6 changes: 0 additions & 6 deletions pkg/connectivity/comparisontable.go
Expand Up @@ -44,12 +44,6 @@ func NewComparisonTable(items []string) *ComparisonTable {
}

func NewComparisonTableFrom(kubeProbe *probe.Table, simulatedProbe *probe.Table) *ComparisonTable {
if kubeProbe == nil {
panic("kubeprobe is nil")
}
if simulatedProbe == nil {
panic("sim probe is nil")
}
if len(kubeProbe.Wrapped.Froms) != len(simulatedProbe.Wrapped.Froms) || len(kubeProbe.Wrapped.Tos) != len(simulatedProbe.Wrapped.Tos) {
panic(errors.Errorf("cannot compare tables of different dimensions"))
}
Expand Down
4 changes: 0 additions & 4 deletions pkg/connectivity/interpreter.go
Expand Up @@ -2,7 +2,6 @@ package connectivity

import (
"fmt"
"log"
"time"

"github.com/mattfenwick/cyclonus/pkg/connectivity/probe"
Expand Down Expand Up @@ -119,7 +118,6 @@ func (t *Interpreter) ExecuteTestCase(testCase *generator.TestCase) *Result {
} else if action.DeletePod != nil {
err = testCaseState.DeletePod(action.DeletePod.Namespace, action.DeletePod.Pod)
} else if action.CreateService != nil {
log.Printf("creating service %+v", action.CreateService)
err = testCaseState.CreateService(action.CreateService.Service)
} else if action.DeleteService != nil {
err = testCaseState.DeleteService(action.DeleteService.Service)
Expand Down Expand Up @@ -154,10 +152,8 @@ func (t *Interpreter) runProbe(testCaseState *TestCaseState, probeConfig *genera
parsedPolicy,
append([]*networkingv1.NetworkPolicy{}, testCaseState.Policies...)) // this looks weird, but just making a new copy to avoid accidentally mutating it elsewhere

logrus.Debug("before running kube probe")
for i := 0; i <= t.Config.KubeProbeRetries; i++ {
logrus.Infof("running kube probe on try %d", i+1)
logrus.Debugf("running new kube probe on try %d", i+1)
stepResult.AddKubeProbe(t.kubeRunner.RunProbeForConfig(probeConfig, testCaseState.Resources))
// no differences between synthetic and kube probes? then we can stop
if stepResult.LastComparison().ValueCounts(t.Config.IgnoreLoopback)[DifferentComparison] == 0 {
Expand Down
12 changes: 1 addition & 11 deletions pkg/connectivity/probe/jobbuilder.go
Expand Up @@ -3,7 +3,6 @@ package probe
import (
"github.com/mattfenwick/cyclonus/pkg/generator"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
Expand All @@ -13,7 +12,6 @@ type JobBuilder struct {
}

func (j *JobBuilder) GetJobsForProbeConfig(resources *Resources, config *generator.ProbeConfig) *Jobs {
logrus.Debugf("getting jobs for probe config %v", config)
if config.AllAvailable {
return j.GetJobsAllAvailableServers(resources, config.Mode)
} else if config.Mode == generator.ProbeModeNodeIP {
Expand All @@ -27,8 +25,6 @@ func (j *JobBuilder) GetJobsForProbeConfig(resources *Resources, config *generat

func (j *JobBuilder) GetJobsForNodeIP(resources *Resources, config *generator.ProbeConfig) *Jobs {
jobs := &Jobs{}
logrus.Debugf("getting jobs for node ip %v", config)

for _, podFrom := range resources.Pods {
for _, node := range resources.Nodes {
job := &Job{
Expand All @@ -51,7 +47,7 @@ func (j *JobBuilder) GetJobsForNodeIP(resources *Resources, config *generator.Pr
TimeoutSeconds: j.TimeoutSeconds,
}
jobs.Valid = append(jobs.Valid, job)

}
}

Expand All @@ -60,11 +56,8 @@ func (j *JobBuilder) GetJobsForNodeIP(resources *Resources, config *generator.Pr

func (j *JobBuilder) GetJobsForNamedPortProtocol(resources *Resources, port intstr.IntOrString, protocol v1.Protocol, mode generator.ProbeMode) *Jobs {
jobs := &Jobs{}
logrus.Debugf("named port getting jobs for resources %+v", resources)
for _, podFrom := range resources.Pods {
logrus.Debugf("named port getting jobs for podfrom %+v", podFrom)
for _, podTo := range resources.Pods {
logrus.Debugf("named port getting jobs for podTo %+v", podTo)
job := &Job{
FromKey: podFrom.PodString().String(),
FromNamespace: podFrom.Namespace,
Expand Down Expand Up @@ -116,11 +109,8 @@ func (j *JobBuilder) GetJobsForNamedPortProtocol(resources *Resources, port ints

func (j *JobBuilder) GetJobsAllAvailableServers(resources *Resources, mode generator.ProbeMode) *Jobs {
var jobs []*Job
logrus.Debugf("all available getting jobs for resources %+v", resources)
for _, podFrom := range resources.Pods {
logrus.Debugf("all available getting jobs for podfrom %+v", podFrom)
for _, podTo := range resources.Pods {
logrus.Debugf("all available getting jobs for podTo %+v", podTo)
for _, contTo := range podTo.Containers {
jobs = append(jobs, &Job{
FromKey: podFrom.PodString().String(),
Expand Down
15 changes: 2 additions & 13 deletions pkg/connectivity/probe/jobrunner.go
Expand Up @@ -29,24 +29,17 @@ func NewKubeBatchRunner(kubernetes kube.IKubernetes, workers int, jobBuilder *Jo
}

func (p *Runner) RunProbeForConfig(probeConfig *generator.ProbeConfig, resources *Resources) *Table {
jobs := p.JobBuilder.GetJobsForProbeConfig(resources, probeConfig)
logrus.Debugf("got jobs %+v", jobs)
jobresults := p.runProbe(jobs)
jobresults := p.runProbe(p.JobBuilder.GetJobsForProbeConfig(resources, probeConfig))
if probeConfig.Mode == generator.ProbeModeNodeIP {
return NewNodeTableFromJobResults(resources, jobresults)
return NewNodeTableFromJobResults(resources, jobresults) // TODO: table for nodes?
} else {
return NewPodTableFromJobResults(resources, jobresults)
}
}

func (p *Runner) runProbe(jobs *Jobs) []*JobResult {
logrus.Debugf("running probe for job %+v", jobs)
resultSlice := p.JobRunner.RunJobs(jobs.Valid)

for _, res := range resultSlice {
logrus.Debugf("resultslice combined: %+v, ingress: %+v, egress %+v", res.Combined, res.Ingress, res.Egress)
}

invalidPP := ConnectivityInvalidPortProtocol
unknown := ConnectivityUnknown
for _, j := range jobs.BadPortProtocol {
Expand Down Expand Up @@ -113,7 +106,6 @@ type KubeJobRunner struct {
}

func (k *KubeJobRunner) RunJobs(jobs []*Job) []*JobResult {
logrus.Debugf("run job single with %+v", jobs)
size := len(jobs)
jobsChan := make(chan *Job, size)
resultsChan := make(chan *JobResult, size)
Expand All @@ -137,7 +129,6 @@ func (k *KubeJobRunner) RunJobs(jobs []*Job) []*JobResult {
// probeWorker continues polling a pod connectivity status, until the incoming "jobs" channel is closed, and writes results back out to the "results" channel.
// it only writes pass/fail status to a channel and has no failure side effects, this is by design since we do not want to fail inside a goroutine.
func (k *KubeJobRunner) worker(jobs <-chan *Job, results chan<- *JobResult) {
logrus.Debugf("running probe worker")
for job := range jobs {
logrus.Debugf("probing connectivity for job %+v", job)
connectivity, _ := probeConnectivity(k.Kubernetes, job)
Expand All @@ -150,7 +141,6 @@ func (k *KubeJobRunner) worker(jobs <-chan *Job, results chan<- *JobResult) {

func probeConnectivity(k8s kube.IKubernetes, job *Job) (Connectivity, string) {
commandDebugString := strings.Join(job.KubeExecCommand(), " ")
logrus.Debugf("probe connectivity")
stdout, stderr, commandErr, err := k8s.ExecuteRemoteCommand(job.FromNamespace, job.FromPod, job.FromContainer, job.ClientCommand())
logrus.Debugf("stdout, stderr from [%s]: \n%s\n%s", commandDebugString, stdout, stderr)
if err != nil {
Expand All @@ -174,7 +164,6 @@ func NewKubeBatchJobRunner(k8s kube.IKubernetes, workers int) *KubeBatchJobRunne
}

func (k *KubeBatchJobRunner) RunJobs(jobs []*Job) []*JobResult {
logrus.Debugf("run job batch")
jobMap := map[string]*Job{}

// 1. batch up jobs
Expand Down
4 changes: 2 additions & 2 deletions pkg/connectivity/probe/pod.go
Expand Up @@ -125,7 +125,7 @@ func (p *Pod) KubeService() *v1.Service {
}
}

func (p *Pod) KubeServiceLoadBalancer() *v1.Service {
func (p *Pod) KubeServiceNodePort() *v1.Service {
ports := slice.Map(func(cont *Container) v1.ServicePort { return cont.KubeServicePort() }, p.Containers)
tcpPorts := slice.Filter(func(port v1.ServicePort) bool { return port.Protocol == v1.ProtocolTCP }, ports)
return &v1.Service{
Expand All @@ -136,7 +136,7 @@ func (p *Pod) KubeServiceLoadBalancer() *v1.Service {
Spec: v1.ServiceSpec{
Ports: tcpPorts,
Selector: p.Labels,
Type: v1.ServiceTypeLoadBalancer,
Type: v1.ServiceTypeNodePort,
},
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/connectivity/probe/resources.go
Expand Up @@ -333,7 +333,7 @@ func (r *Resources) CreateResourcesInKube(kubernetes kube.IKubernetes) error {
}
}
kubeService := pod.KubeService()
kubeServiceLoadBalancer := pod.KubeServiceLoadBalancer()
kubeServiceLoadBalancer := pod.KubeServiceNodePort()
_, err = kubernetes.GetService(kubeService.Namespace, kubeService.Name)
if err != nil {
_, err = kubernetes.CreateService(kubeService)
Expand Down
2 changes: 0 additions & 2 deletions pkg/connectivity/stepresult.go
Expand Up @@ -3,7 +3,6 @@ package connectivity
import (
"github.com/mattfenwick/cyclonus/pkg/connectivity/probe"
"github.com/mattfenwick/cyclonus/pkg/matcher"
"github.com/sirupsen/logrus"
networkingv1 "k8s.io/api/networking/v1"
)

Expand All @@ -30,7 +29,6 @@ func (s *StepResult) AddKubeProbe(kubeProbe *probe.Table) {

func (s *StepResult) Comparison(i int) *ComparisonTable {
if s.comparisons[i] == nil {
logrus.Debugf("comparing i [%d] in kubeprobes %+v", i, s.KubeProbes)
s.comparisons[i] = NewComparisonTableFrom(s.KubeProbes[i], s.SimulatedProbe)
}
return s.comparisons[i]
Expand Down
3 changes: 1 addition & 2 deletions pkg/connectivity/testcasestate.go
Expand Up @@ -121,8 +121,7 @@ func (t *TestCaseState) CreatePod(ns string, pod string, labels map[string]strin
return err
}

fmt.Println("creating lb svc")
_, err = t.Kubernetes.CreateService(newPod.KubeServiceLoadBalancer())
_, err = t.Kubernetes.CreateService(newPod.KubeServiceNodePort())
if err != nil {
return err
}
Expand Down
Expand Up @@ -2,12 +2,11 @@ package generator

import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)

func (t *TestCaseGenerator) LoadBalancerTestCase() []*TestCase {
func (t *TestCaseGenerator) NodePortTestCases() []*TestCase {
// TODO: simplify this struct passed to CreateService
svc1 := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -20,7 +19,7 @@ func (t *TestCaseGenerator) LoadBalancerTestCase() []*TestCase {
{
Protocol: v1.ProtocolTCP,
Port: 81,
NodePort: 32086,
NodePort: 32088,
},
},
Selector: map[string]string{"pod": "a"},
Expand All @@ -30,22 +29,23 @@ func (t *TestCaseGenerator) LoadBalancerTestCase() []*TestCase {
AllAvailable: false,
PortProtocol: &PortProtocol{
Protocol: v1.ProtocolTCP,
Port: intstr.FromInt(32086),
Port: intstr.FromInt(32088),
},
Mode: ProbeModeNodeIP,
}
denyAll := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "deny-all-policy",
Namespace: "x",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{
networkingv1.PolicyTypeIngress,
/*
denyAll := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "deny-all-policy",
Namespace: "x",
},
},
}
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{
networkingv1.PolicyTypeIngress,
},
},
}*/

return []*TestCase{
NewTestCase("should allow access to nodeport with no netpols applied",
Expand All @@ -54,11 +54,13 @@ func (t *TestCaseGenerator) LoadBalancerTestCase() []*TestCase {
CreateService(svc1), // TODO: add service reset, without removing core test services
),
),
NewTestCase("should deny access to nodeport with netpols applied",
NewStringSet(TagLoadBalancer),
NewTestStep(probe,
CreatePolicy(denyAll),
/*
NewTestCase("should deny access to nodeport with netpols applied",
NewStringSet(TagLoadBalancer),
NewTestStep(probe,
CreatePolicy(denyAll),
),
),
),
*/
}
}
3 changes: 1 addition & 2 deletions pkg/generator/testcase.go
Expand Up @@ -142,8 +142,7 @@ func ParseProbeMode(mode string) (ProbeMode, error) {
}

// ProbeConfig: exactly one field must be non-null (or, in AllAvailable's case, non-false). This
//
// models a discriminated union (sum type).
// models a discriminated union (sum type).
type ProbeConfig struct {
AllAvailable bool
PortProtocol *PortProtocol
Expand Down
2 changes: 1 addition & 1 deletion pkg/generator/testcasegenerator.go
Expand Up @@ -73,7 +73,7 @@ func (t *TestCaseGenerator) GenerateAllTestCases() []*TestCase {
t.ConflictTestCases(),
t.NamespaceTestCases(),
t.UpstreamE2ETestCases(),
t.LoadBalancerTestCase())
t.NodePortTestCases())
}

func (t *TestCaseGenerator) GenerateTestCases() []*TestCase {
Expand Down
4 changes: 2 additions & 2 deletions pkg/generator/testcasegenerator_tests.go
Expand Up @@ -19,8 +19,8 @@ func RunTestCaseGeneratorTests() {
Expect(len(gen.PortProtocolTestCases())).To(Equal(70))
Expect(len(gen.ConflictTestCases())).To(Equal(16))
Expect(len(gen.NamespaceTestCases())).To(Equal(2))

Expect(len(gen.GenerateTestCases())).To(Equal(230))
Expect(len(gen.NodePortTestCases())).To(Equal(1))
Expect(len(gen.GenerateTestCases())).To(Equal(231))
})
})
}
2 changes: 0 additions & 2 deletions pkg/worker/worker.go
Expand Up @@ -2,7 +2,6 @@ package worker

import (
"encoding/json"
"log"
"os/exec"

"github.com/pkg/errors"
Expand Down Expand Up @@ -71,7 +70,6 @@ func IssueRequestWithRetries(r *Request, retries int) *Result {

func IssueRequest(r *Request) *Result {
command := r.Command()
log.Printf("running command: %v", command)
name, args := command[0], command[1:]
cmd := exec.Command(name, args...)
out, err := cmd.Output()
Expand Down

0 comments on commit be1a818

Please sign in to comment.