Skip to content

Commit

Permalink
minor cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
matmerr committed Mar 9, 2023
1 parent ee65f12 commit a5b04d0
Show file tree
Hide file tree
Showing 10 changed files with 30 additions and 53 deletions.
4 changes: 2 additions & 2 deletions pkg/connectivity/comparisontable.go
Expand Up @@ -45,10 +45,10 @@ func NewComparisonTable(items []string) *ComparisonTable {

func NewComparisonTableFrom(kubeProbe *probe.Table, simulatedProbe *probe.Table) *ComparisonTable {
if kubeProbe == nil {
panic("kubeprobe is nil")
panic(errors.Errorf("kubeprobe is nil"))
}
if simulatedProbe == nil {
panic("sim probe is nil")
panic(errors.Errorf("sim probe is nil"))
}
if len(kubeProbe.Wrapped.Froms) != len(simulatedProbe.Wrapped.Froms) || len(kubeProbe.Wrapped.Tos) != len(simulatedProbe.Wrapped.Tos) {
panic(errors.Errorf("cannot compare tables of different dimensions"))
Expand Down
4 changes: 0 additions & 4 deletions pkg/connectivity/interpreter.go
Expand Up @@ -2,7 +2,6 @@ package connectivity

import (
"fmt"
"log"
"time"

"github.com/mattfenwick/cyclonus/pkg/connectivity/probe"
Expand Down Expand Up @@ -119,7 +118,6 @@ func (t *Interpreter) ExecuteTestCase(testCase *generator.TestCase) *Result {
} else if action.DeletePod != nil {
err = testCaseState.DeletePod(action.DeletePod.Namespace, action.DeletePod.Pod)
} else if action.CreateService != nil {
log.Printf("creating service %+v", action.CreateService)
err = testCaseState.CreateService(action.CreateService.Service)
} else if action.DeleteService != nil {
err = testCaseState.DeleteService(action.DeleteService.Service)
Expand Down Expand Up @@ -154,10 +152,8 @@ func (t *Interpreter) runProbe(testCaseState *TestCaseState, probeConfig *genera
parsedPolicy,
append([]*networkingv1.NetworkPolicy{}, testCaseState.Policies...)) // this looks weird, but just making a new copy to avoid accidentally mutating it elsewhere

logrus.Debug("before running kube probe")
for i := 0; i <= t.Config.KubeProbeRetries; i++ {
logrus.Infof("running kube probe on try %d", i+1)
logrus.Debugf("running new kube probe on try %d", i+1)
stepResult.AddKubeProbe(t.kubeRunner.RunProbeForConfig(probeConfig, testCaseState.Resources))
// no differences between synthetic and kube probes? then we can stop
if stepResult.LastComparison().ValueCounts(t.Config.IgnoreLoopback)[DifferentComparison] == 0 {
Expand Down
3 changes: 0 additions & 3 deletions pkg/connectivity/probe/jobrunner.go
Expand Up @@ -137,7 +137,6 @@ func (k *KubeJobRunner) RunJobs(jobs []*Job) []*JobResult {
// probeWorker continues polling a pod connectivity status, until the incoming "jobs" channel is closed, and writes results back out to the "results" channel.
// it only writes pass/fail status to a channel and has no failure side effects, this is by design since we do not want to fail inside a goroutine.
func (k *KubeJobRunner) worker(jobs <-chan *Job, results chan<- *JobResult) {
logrus.Debugf("running probe worker")
for job := range jobs {
logrus.Debugf("probing connectivity for job %+v", job)
connectivity, _ := probeConnectivity(k.Kubernetes, job)
Expand All @@ -150,7 +149,6 @@ func (k *KubeJobRunner) worker(jobs <-chan *Job, results chan<- *JobResult) {

func probeConnectivity(k8s kube.IKubernetes, job *Job) (Connectivity, string) {
commandDebugString := strings.Join(job.KubeExecCommand(), " ")
logrus.Debugf("probe connectivity")
stdout, stderr, commandErr, err := k8s.ExecuteRemoteCommand(job.FromNamespace, job.FromPod, job.FromContainer, job.ClientCommand())
logrus.Debugf("stdout, stderr from [%s]: \n%s\n%s", commandDebugString, stdout, stderr)
if err != nil {
Expand All @@ -174,7 +172,6 @@ func NewKubeBatchJobRunner(k8s kube.IKubernetes, workers int) *KubeBatchJobRunne
}

func (k *KubeBatchJobRunner) RunJobs(jobs []*Job) []*JobResult {
logrus.Debugf("run job batch")
jobMap := map[string]*Job{}

// 1. batch up jobs
Expand Down
14 changes: 0 additions & 14 deletions pkg/connectivity/probe/resources_test.go
Expand Up @@ -51,20 +51,6 @@ func RunResourcesTests() {
Expect(r2.Pods[0].Labels).To(Equal(map[string]string{}))
})

It("Should set pod labels nondestructively", func() {
labels := map[string]string{"pod": "b"}
r := &Resources{
Namespaces: map[string]map[string]string{
"y": {},
},
Pods: []*Pod{{Namespace: "y", Name: "b", Labels: labels}},
}
r2, err := r.SetPodLabels("y", "b", map[string]string{})
Expect(err).To(Succeed())

Expect(r.Pods[0].Labels).To(Equal(labels))
Expect(r2.Pods[0].Labels).To(Equal(map[string]string{}))
})
It("Should create a service nondestructively", func() {
r := &Resources{
Services: make(map[string]*v1.Service),
Expand Down
1 change: 1 addition & 0 deletions pkg/connectivity/probe/table.go
Expand Up @@ -50,6 +50,7 @@ func NewPodTableFromJobResults(resources *Resources, jobResults []*JobResult) *T
return table
}

// Note: The UX here needs some work to accurately portray pod->node matrix
func NewNodeTableFromJobResults(resources *Resources, jobResults []*JobResult) *Table {
res := append(resources.SortedNodeNames(), resources.SortedPodNames()...)
logrus.Debugf("merged table %+v", res)
Expand Down
2 changes: 0 additions & 2 deletions pkg/connectivity/testcasestate.go
Expand Up @@ -115,13 +115,11 @@ func (t *TestCaseState) CreatePod(ns string, pod string, labels map[string]strin
if err != nil {
return err
}
fmt.Println("creating cluster svc")
_, err = t.Kubernetes.CreateService(newPod.KubeService())
if err != nil {
return err
}

fmt.Println("creating lb svc")
_, err = t.Kubernetes.CreateService(newPod.KubeServiceLoadBalancer())
if err != nil {
return err
Expand Down
3 changes: 3 additions & 0 deletions pkg/generator/loadbalancertestcases.go
Expand Up @@ -46,6 +46,9 @@ func (t *TestCaseGenerator) LoadBalancerTestCase() []*TestCase {
},
}

// these tests cases are currently nonblocking the suite,
// but there is still work needed to measure the success/failure of the
// probe in the node+pod table
return []*TestCase{
NewTestCase("should allow access to nodeport with no netpols applied",
NewStringSet(TagLoadBalancer),
Expand Down
1 change: 0 additions & 1 deletion pkg/generator/testcase.go
Expand Up @@ -142,7 +142,6 @@ func ParseProbeMode(mode string) (ProbeMode, error) {
}

// ProbeConfig: exactly one field must be non-null (or, in AllAvailable's case, non-false). This
//
// models a discriminated union (sum type).
type ProbeConfig struct {
AllAvailable bool
Expand Down
49 changes: 24 additions & 25 deletions pkg/generator/testcasegenerator.go
Expand Up @@ -5,31 +5,30 @@ TODO
Test cases:
1 policy with ingress:
- empty ingress
- ingress with 1 rule
- empty
- 1 port
- empty
- protocol
- port
- port + protocol
- 2 ports
- 1 from
- 8 combos: (nil + nil => might mean ipblock must be non-nil)
- pod sel: nil, empty, non-empty
- ns sel: nil, empty, non-empty
- ipblock
- no except
- yes except
- 2 froms
- 1 pod/ns, 1 ipblock
- 2 pod/ns
- 2 ipblocks
- 1 port, 1 from
- 2 ports, 2 froms
- ingress with 2 rules
- ingress with 3 rules
- empty ingress
- ingress with 1 rule
- empty
- 1 port
- empty
- protocol
- port
- port + protocol
- 2 ports
- 1 from
- 8 combos: (nil + nil => might mean ipblock must be non-nil)
- pod sel: nil, empty, non-empty
- ns sel: nil, empty, non-empty
- ipblock
- no except
- yes except
- 2 froms
- 1 pod/ns, 1 ipblock
- 2 pod/ns
- 2 ipblocks
- 1 port, 1 from
- 2 ports, 2 froms
- ingress with 2 rules
- ingress with 3 rules
2 policies with ingress
1 policy with egress
2 policies with egress
Expand Down
2 changes: 0 additions & 2 deletions pkg/worker/worker.go
Expand Up @@ -2,7 +2,6 @@ package worker

import (
"encoding/json"
"log"
"os/exec"

"github.com/pkg/errors"
Expand Down Expand Up @@ -71,7 +70,6 @@ func IssueRequestWithRetries(r *Request, retries int) *Result {

func IssueRequest(r *Request) *Result {
command := r.Command()
log.Printf("running command: %v", command)
name, args := command[0], command[1:]
cmd := exec.Command(name, args...)
out, err := cmd.Output()
Expand Down

0 comments on commit a5b04d0

Please sign in to comment.