Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

se0g1/cve-2018-1002101: test/e2e/scalability/density.go; 367 LoC #8

Open
githubvet opened this issue Jan 9, 2021 · 0 comments
Open

Comments

@githubvet
Copy link

Found a possible issue in se0g1/cve-2018-1002101 at test/e2e/scalability/density.go

Below is the message reported by the analyzer for this snippet of code. Beware that the analyzer only reports the first issue it finds, so please do not limit your consideration to the contents of the below message.

Click here to see the code in its original context.

Click here to show the 367 line(s) of Go which triggered the analyzer.
for _, testArg := range densityTests {
	feature := "ManualPerformance"
	switch testArg.podsPerNode {
	case 30:
		if isCanonical(&testArg) {
			feature = "Performance"
		}
	case 95:
		feature = "HighDensityPerformance"
	}

	name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps and %v daemons",
		feature,
		testArg.podsPerNode,
		testArg.kind,
		testArg.secretsPerPod,
		testArg.configMapsPerPod,
		testArg.daemonsPerNode,
	)
	if testArg.quotas {
		name += " with quotas"
	}
	itArg := testArg
	It(name, func() {
		nodePrepPhase := testPhaseDurations.StartPhase(100, "node preparation")
		defer nodePrepPhase.End()
		nodePreparer := framework.NewE2ETestNodePreparer(
			f.ClientSet,
			[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
		)
		framework.ExpectNoError(nodePreparer.PrepareNodes())
		defer nodePreparer.CleanupNodes()

		podsPerNode := itArg.podsPerNode
		if podsPerNode == 30 {
			f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
		}
		totalPods = (podsPerNode - itArg.daemonsPerNode) * nodeCount
		fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
		framework.ExpectNoError(err)
		defer fileHndl.Close()
		nodePrepPhase.End()

		// nodeCountPerNamespace and CreateNamespaces are defined in load.go
		numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
		namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
		framework.ExpectNoError(err)
		if itArg.quotas {
			framework.ExpectNoError(CreateQuotas(f, namespaces, totalPods+nodeCount, testPhaseDurations.StartPhase(210, "quota creation")))
		}

		configs := make([]testutils.RunObjectConfig, numberOfCollections)
		secretConfigs := make([]*testutils.SecretConfig, 0, numberOfCollections*itArg.secretsPerPod)
		configMapConfigs := make([]*testutils.ConfigMapConfig, 0, numberOfCollections*itArg.configMapsPerPod)
		// Since all RCs are created at the same time, timeout for each config
		// has to assume that it will be run at the very end.
		podThroughput := 20
		timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
		// createClients is defined in load.go
		clients, internalClients, scalesClients, err := createClients(numberOfCollections)
		for i := 0; i < numberOfCollections; i++ {
			nsName := namespaces[i].Name
			secretNames := []string{}
			for j := 0; j < itArg.secretsPerPod; j++ {
				secretName := fmt.Sprintf("density-secret-%v-%v", i, j)
				secretConfigs = append(secretConfigs, &testutils.SecretConfig{
					Content:   map[string]string{"foo": "bar"},
					Client:    clients[i],
					Name:      secretName,
					Namespace: nsName,
					LogFunc:   framework.Logf,
				})
				secretNames = append(secretNames, secretName)
			}
			configMapNames := []string{}
			for j := 0; j < itArg.configMapsPerPod; j++ {
				configMapName := fmt.Sprintf("density-configmap-%v-%v", i, j)
				configMapConfigs = append(configMapConfigs, &testutils.ConfigMapConfig{
					Content:   map[string]string{"foo": "bar"},
					Client:    clients[i],
					Name:      configMapName,
					Namespace: nsName,
					LogFunc:   framework.Logf,
				})
				configMapNames = append(configMapNames, configMapName)
			}
			name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
			baseConfig := &testutils.RCConfig{
				Client:               clients[i],
				InternalClient:       internalClients[i],
				ScalesGetter:         scalesClients[i],
				Image:                framework.GetPauseImageName(f.ClientSet),
				Name:                 name,
				Namespace:            nsName,
				Labels:               map[string]string{"type": "densityPod"},
				PollInterval:         DensityPollInterval,
				Timeout:              timeout,
				PodStatusFile:        fileHndl,
				Replicas:             (totalPods + numberOfCollections - 1) / numberOfCollections,
				CpuRequest:           nodeCpuCapacity / 100,
				MemRequest:           nodeMemCapacity / 100,
				MaxContainerFailures: &MaxContainerFailures,
				Silent:               true,
				LogFunc:              framework.Logf,
				SecretNames:          secretNames,
				ConfigMapNames:       configMapNames,
			}
			switch itArg.kind {
			case api.Kind("ReplicationController"):
				configs[i] = baseConfig
			case extensions.Kind("ReplicaSet"):
				configs[i] = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
			case extensions.Kind("Deployment"):
				configs[i] = &testutils.DeploymentConfig{RCConfig: *baseConfig}
			case batch.Kind("Job"):
				configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
			default:
				framework.Failf("Unsupported kind: %v", itArg.kind)
			}
		}

		// Single client is running out of http2 connections in delete phase, hence we need more.
		clients, internalClients, _, err = createClients(2)

		dConfig := DensityTestConfig{
			ClientSets:         clients,
			InternalClientsets: internalClients,
			Configs:            configs,
			PodCount:           totalPods,
			PollInterval:       DensityPollInterval,
			kind:               itArg.kind,
			SecretConfigs:      secretConfigs,
			ConfigMapConfigs:   configMapConfigs,
		}

		for i := 0; i < itArg.daemonsPerNode; i++ {
			dConfig.DaemonConfigs = append(dConfig.DaemonConfigs,
				&testutils.DaemonConfig{
					Client:    f.ClientSet,
					Name:      fmt.Sprintf("density-daemon-%v", i),
					Namespace: f.Namespace.Name,
					LogFunc:   framework.Logf,
				})
		}
		e2eStartupTime = runDensityTest(dConfig, testPhaseDurations)
		if itArg.runLatencyTest {
			By("Scheduling additional Pods to measure startup latencies")

			createTimes := make(map[string]metav1.Time, 0)
			nodeNames := make(map[string]string, 0)
			scheduleTimes := make(map[string]metav1.Time, 0)
			runTimes := make(map[string]metav1.Time, 0)
			watchTimes := make(map[string]metav1.Time, 0)

			var mutex sync.Mutex
			checkPod := func(p *v1.Pod) {
				mutex.Lock()
				defer mutex.Unlock()
				defer GinkgoRecover()

				if p.Status.Phase == v1.PodRunning {
					if _, found := watchTimes[p.Name]; !found {
						watchTimes[p.Name] = metav1.Now()
						createTimes[p.Name] = p.CreationTimestamp
						nodeNames[p.Name] = p.Spec.NodeName
						var startTime metav1.Time
						for _, cs := range p.Status.ContainerStatuses {
							if cs.State.Running != nil {
								if startTime.Before(&cs.State.Running.StartedAt) {
									startTime = cs.State.Running.StartedAt
								}
							}
						}
						if startTime != metav1.NewTime(time.Time{}) {
							runTimes[p.Name] = startTime
						} else {
							framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
						}
					}
				}
			}

			additionalPodsPrefix = "density-latency-pod"
			stopCh := make(chan struct{})

			latencyPodStores := make([]cache.Store, len(namespaces))
			for i := 0; i < len(namespaces); i++ {
				nsName := namespaces[i].Name
				latencyPodsStore, controller := cache.NewInformer(
					&cache.ListWatch{
						ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
							options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
							obj, err := c.CoreV1().Pods(nsName).List(options)
							return runtime.Object(obj), err
						},
						WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
							options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
							return c.CoreV1().Pods(nsName).Watch(options)
						},
					},
					&v1.Pod{},
					0,
					cache.ResourceEventHandlerFuncs{
						AddFunc: func(obj interface{}) {
							p, ok := obj.(*v1.Pod)
							if !ok {
								framework.Logf("Failed to cast observed object to *v1.Pod.")
							}
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
						UpdateFunc: func(oldObj, newObj interface{}) {
							p, ok := newObj.(*v1.Pod)
							if !ok {
								framework.Logf("Failed to cast observed object to *v1.Pod.")
							}
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
					},
				)
				latencyPodStores[i] = latencyPodsStore

				go controller.Run(stopCh)
			}

			// Create some additional pods with throughput ~5 pods/sec.
			latencyPodStartupPhase := testPhaseDurations.StartPhase(800, "latency pods creation")
			defer latencyPodStartupPhase.End()
			var wg sync.WaitGroup
			wg.Add(nodeCount)
			// Explicitly set requests here.
			// Thanks to it we trigger increasing priority function by scheduling
			// a pod to a node, which in turn will result in spreading latency pods
			// more evenly between nodes.
			cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
			memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
			if podsPerNode > 30 {
				// This is to make them schedulable on high-density tests
				// (e.g. 100 pods/node kubemark).
				cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
				memRequest = *resource.NewQuantity(0, resource.DecimalSI)
			}
			rcNameToNsMap := map[string]string{}
			for i := 1; i <= nodeCount; i++ {
				name := additionalPodsPrefix + "-" + strconv.Itoa(i)
				nsName := namespaces[i%len(namespaces)].Name
				rcNameToNsMap[name] = nsName
				go createRunningPodFromRC(&wg, c, name, nsName, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
				time.Sleep(200 * time.Millisecond)
			}
			wg.Wait()
			latencyPodStartupPhase.End()

			latencyMeasurementPhase := testPhaseDurations.StartPhase(810, "pod startup latencies measurement")
			defer latencyMeasurementPhase.End()
			By("Waiting for all Pods begin observed by the watch...")
			waitTimeout := 10 * time.Minute
			for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
				if time.Since(start) < waitTimeout {
					framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
				}
			}
			close(stopCh)

			nodeToLatencyPods := make(map[string]int)
			for i := range latencyPodStores {
				for _, item := range latencyPodStores[i].List() {
					pod := item.(*v1.Pod)
					nodeToLatencyPods[pod.Spec.NodeName]++
				}
				for node, count := range nodeToLatencyPods {
					if count > 1 {
						framework.Logf("%d latency pods scheduled on %s", count, node)
					}
				}
			}

			for i := 0; i < len(namespaces); i++ {
				nsName := namespaces[i].Name
				selector := fields.Set{
					"involvedObject.kind":      "Pod",
					"involvedObject.namespace": nsName,
					"source":                   v1.DefaultSchedulerName,
				}.AsSelector().String()
				options := metav1.ListOptions{FieldSelector: selector}
				schedEvents, err := c.CoreV1().Events(nsName).List(options)
				framework.ExpectNoError(err)
				for k := range createTimes {
					for _, event := range schedEvents.Items {
						if event.InvolvedObject.Name == k {
							scheduleTimes[k] = event.FirstTimestamp
							break
						}
					}
				}
			}

			scheduleLag := make([]framework.PodLatencyData, 0)
			startupLag := make([]framework.PodLatencyData, 0)
			watchLag := make([]framework.PodLatencyData, 0)
			schedToWatchLag := make([]framework.PodLatencyData, 0)
			e2eLag := make([]framework.PodLatencyData, 0)

			for name, create := range createTimes {
				sched, ok := scheduleTimes[name]
				if !ok {
					framework.Logf("Failed to find schedule time for %v", name)
					missingMeasurements++
				}
				run, ok := runTimes[name]
				if !ok {
					framework.Logf("Failed to find run time for %v", name)
					missingMeasurements++
				}
				watch, ok := watchTimes[name]
				if !ok {
					framework.Logf("Failed to find watch time for %v", name)
					missingMeasurements++
				}
				node, ok := nodeNames[name]
				if !ok {
					framework.Logf("Failed to find node for %v", name)
					missingMeasurements++
				}

				scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
				startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
				watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
				schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
				e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
			}

			sort.Sort(framework.LatencySlice(scheduleLag))
			sort.Sort(framework.LatencySlice(startupLag))
			sort.Sort(framework.LatencySlice(watchLag))
			sort.Sort(framework.LatencySlice(schedToWatchLag))
			sort.Sort(framework.LatencySlice(e2eLag))

			framework.PrintLatencies(scheduleLag, "worst schedule latencies")
			framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
			framework.PrintLatencies(watchLag, "worst watch latencies")
			framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
			framework.PrintLatencies(e2eLag, "worst e2e total latencies")

			// Test whether e2e pod startup time is acceptable.
			podStartupLatency := &framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
			f.TestSummaries = append(f.TestSummaries, podStartupLatency)
			framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))

			framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
			latencyMeasurementPhase.End()

			By("Removing additional replication controllers")
			podDeletionPhase := testPhaseDurations.StartPhase(820, "latency pods deletion")
			defer podDeletionPhase.End()
			deleteRC := func(i int) {
				defer GinkgoRecover()
				name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
				framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
			}
			workqueue.Parallelize(25, nodeCount, deleteRC)
			podDeletionPhase.End()
		}
		cleanupDensityTest(dConfig, testPhaseDurations)
	})
}
Click here to show extra information the analyzer produced.
No path was found through the callgraph that could lead to a function which writes a pointer argument.

No path was found through the callgraph that could lead to a function which passes a pointer to third-party code.

root signature {isCanonical 1} was not found in the callgraph; reference was passed directly to third-party code

Leave a reaction on this issue to contribute to the project by classifying this instance as a Bug 👎, Mitigated 👍, or Desirable Behavior 🚀
See the descriptions of the classifications here for more information.

commit ID: 08eb5fb80339f2738e62ab25142bde16debd4a60

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

No branches or pull requests

1 participant