You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Below is the message reported by the analyzer for this snippet of code. Beware that the analyzer only reports the first issue it finds, so please do not limit your consideration to the contents of the below message.
Click here to show the 367 line(s) of Go which triggered the analyzer.
for_, testArg:=rangedensityTests {
feature:="ManualPerformance"switchtestArg.podsPerNode {
case30:
ifisCanonical(&testArg) {
feature="Performance"
}
case95:
feature="HighDensityPerformance"
}
name:=fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets, %v configmaps and %v daemons",
feature,
testArg.podsPerNode,
testArg.kind,
testArg.secretsPerPod,
testArg.configMapsPerPod,
testArg.daemonsPerNode,
)
iftestArg.quotas {
name+=" with quotas"
}
itArg:=testArgIt(name, func() {
nodePrepPhase:=testPhaseDurations.StartPhase(100, "node preparation")
defernodePrepPhase.End()
nodePreparer:=framework.NewE2ETestNodePreparer(
f.ClientSet,
[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
)
framework.ExpectNoError(nodePreparer.PrepareNodes())
defernodePreparer.CleanupNodes()
podsPerNode:=itArg.podsPerNodeifpodsPerNode==30 {
f.AddonResourceConstraints=func() map[string]framework.ResourceConstraint { returndensity30AddonResourceVerifier(nodeCount) }()
}
totalPods= (podsPerNode-itArg.daemonsPerNode) *nodeCountfileHndl, err:=os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
deferfileHndl.Close()
nodePrepPhase.End()
// nodeCountPerNamespace and CreateNamespaces are defined in load.gonumberOfCollections:= (nodeCount+nodeCountPerNamespace-1) /nodeCountPerNamespacenamespaces, err:=CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode), testPhaseDurations.StartPhase(200, "namespace creation"))
framework.ExpectNoError(err)
ifitArg.quotas {
framework.ExpectNoError(CreateQuotas(f, namespaces, totalPods+nodeCount, testPhaseDurations.StartPhase(210, "quota creation")))
}
configs:=make([]testutils.RunObjectConfig, numberOfCollections)
secretConfigs:=make([]*testutils.SecretConfig, 0, numberOfCollections*itArg.secretsPerPod)
configMapConfigs:=make([]*testutils.ConfigMapConfig, 0, numberOfCollections*itArg.configMapsPerPod)
// Since all RCs are created at the same time, timeout for each config// has to assume that it will be run at the very end.podThroughput:=20timeout:=time.Duration(totalPods/podThroughput)*time.Second+3*time.Minute// createClients is defined in load.goclients, internalClients, scalesClients, err:=createClients(numberOfCollections)
fori:=0; i<numberOfCollections; i++ {
nsName:=namespaces[i].NamesecretNames:= []string{}
forj:=0; j<itArg.secretsPerPod; j++ {
secretName:=fmt.Sprintf("density-secret-%v-%v", i, j)
secretConfigs=append(secretConfigs, &testutils.SecretConfig{
Content: map[string]string{"foo": "bar"},
Client: clients[i],
Name: secretName,
Namespace: nsName,
LogFunc: framework.Logf,
})
secretNames=append(secretNames, secretName)
}
configMapNames:= []string{}
forj:=0; j<itArg.configMapsPerPod; j++ {
configMapName:=fmt.Sprintf("density-configmap-%v-%v", i, j)
configMapConfigs=append(configMapConfigs, &testutils.ConfigMapConfig{
Content: map[string]string{"foo": "bar"},
Client: clients[i],
Name: configMapName,
Namespace: nsName,
LogFunc: framework.Logf,
})
configMapNames=append(configMapNames, configMapName)
}
name:=fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
baseConfig:=&testutils.RCConfig{
Client: clients[i],
InternalClient: internalClients[i],
ScalesGetter: scalesClients[i],
Image: framework.GetPauseImageName(f.ClientSet),
Name: name,
Namespace: nsName,
Labels: map[string]string{"type": "densityPod"},
PollInterval: DensityPollInterval,
Timeout: timeout,
PodStatusFile: fileHndl,
Replicas: (totalPods+numberOfCollections-1) /numberOfCollections,
CpuRequest: nodeCpuCapacity/100,
MemRequest: nodeMemCapacity/100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
SecretNames: secretNames,
ConfigMapNames: configMapNames,
}
switchitArg.kind {
caseapi.Kind("ReplicationController"):
configs[i] =baseConfigcaseextensions.Kind("ReplicaSet"):
configs[i] =&testutils.ReplicaSetConfig{RCConfig: *baseConfig}
caseextensions.Kind("Deployment"):
configs[i] =&testutils.DeploymentConfig{RCConfig: *baseConfig}
casebatch.Kind("Job"):
configs[i] =&testutils.JobConfig{RCConfig: *baseConfig}
default:
framework.Failf("Unsupported kind: %v", itArg.kind)
}
}
// Single client is running out of http2 connections in delete phase, hence we need more.clients, internalClients, _, err=createClients(2)
dConfig:=DensityTestConfig{
ClientSets: clients,
InternalClientsets: internalClients,
Configs: configs,
PodCount: totalPods,
PollInterval: DensityPollInterval,
kind: itArg.kind,
SecretConfigs: secretConfigs,
ConfigMapConfigs: configMapConfigs,
}
fori:=0; i<itArg.daemonsPerNode; i++ {
dConfig.DaemonConfigs=append(dConfig.DaemonConfigs,
&testutils.DaemonConfig{
Client: f.ClientSet,
Name: fmt.Sprintf("density-daemon-%v", i),
Namespace: f.Namespace.Name,
LogFunc: framework.Logf,
})
}
e2eStartupTime=runDensityTest(dConfig, testPhaseDurations)
ifitArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
createTimes:=make(map[string]metav1.Time, 0)
nodeNames:=make(map[string]string, 0)
scheduleTimes:=make(map[string]metav1.Time, 0)
runTimes:=make(map[string]metav1.Time, 0)
watchTimes:=make(map[string]metav1.Time, 0)
varmutex sync.MutexcheckPod:=func(p*v1.Pod) {
mutex.Lock()
defermutex.Unlock()
deferGinkgoRecover()
ifp.Status.Phase==v1.PodRunning {
if_, found:=watchTimes[p.Name]; !found {
watchTimes[p.Name] =metav1.Now()
createTimes[p.Name] =p.CreationTimestampnodeNames[p.Name] =p.Spec.NodeNamevarstartTime metav1.Timefor_, cs:=rangep.Status.ContainerStatuses {
ifcs.State.Running!=nil {
ifstartTime.Before(&cs.State.Running.StartedAt) {
startTime=cs.State.Running.StartedAt
}
}
}
ifstartTime!=metav1.NewTime(time.Time{}) {
runTimes[p.Name] =startTime
} else {
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
}
}
}
}
additionalPodsPrefix="density-latency-pod"stopCh:=make(chanstruct{})
latencyPodStores:=make([]cache.Store, len(namespaces))
fori:=0; i<len(namespaces); i++ {
nsName:=namespaces[i].NamelatencyPodsStore, controller:=cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector=labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
obj, err:=c.CoreV1().Pods(nsName).List(options)
returnruntime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector=labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
returnc.CoreV1().Pods(nsName).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(objinterface{}) {
p, ok:=obj.(*v1.Pod)
if!ok {
framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
gocheckPod(p)
},
UpdateFunc: func(oldObj, newObjinterface{}) {
p, ok:=newObj.(*v1.Pod)
if!ok {
framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
gocheckPod(p)
},
},
)
latencyPodStores[i] =latencyPodsStoregocontroller.Run(stopCh)
}
// Create some additional pods with throughput ~5 pods/sec.latencyPodStartupPhase:=testPhaseDurations.StartPhase(800, "latency pods creation")
deferlatencyPodStartupPhase.End()
varwg sync.WaitGroupwg.Add(nodeCount)
// Explicitly set requests here.// Thanks to it we trigger increasing priority function by scheduling// a pod to a node, which in turn will result in spreading latency pods// more evenly between nodes.cpuRequest:=*resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest:=*resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
ifpodsPerNode>30 {
// This is to make them schedulable on high-density tests// (e.g. 100 pods/node kubemark).cpuRequest=*resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest=*resource.NewQuantity(0, resource.DecimalSI)
}
rcNameToNsMap:=map[string]string{}
fori:=1; i<=nodeCount; i++ {
name:=additionalPodsPrefix+"-"+strconv.Itoa(i)
nsName:=namespaces[i%len(namespaces)].NamercNameToNsMap[name] =nsNamegocreateRunningPodFromRC(&wg, c, name, nsName, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200*time.Millisecond)
}
wg.Wait()
latencyPodStartupPhase.End()
latencyMeasurementPhase:=testPhaseDurations.StartPhase(810, "pod startup latencies measurement")
deferlatencyMeasurementPhase.End()
By("Waiting for all Pods begin observed by the watch...")
waitTimeout:=10*time.Minuteforstart:=time.Now(); len(watchTimes) <nodeCount; time.Sleep(10*time.Second) {
iftime.Since(start) <waitTimeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
nodeToLatencyPods:=make(map[string]int)
fori:=rangelatencyPodStores {
for_, item:=rangelatencyPodStores[i].List() {
pod:=item.(*v1.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
fornode, count:=rangenodeToLatencyPods {
ifcount>1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
}
}
}
fori:=0; i<len(namespaces); i++ {
nsName:=namespaces[i].Nameselector:= fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": nsName,
"source": v1.DefaultSchedulerName,
}.AsSelector().String()
options:= metav1.ListOptions{FieldSelector: selector}
schedEvents, err:=c.CoreV1().Events(nsName).List(options)
framework.ExpectNoError(err)
fork:=rangecreateTimes {
for_, event:=rangeschedEvents.Items {
ifevent.InvolvedObject.Name==k {
scheduleTimes[k] =event.FirstTimestampbreak
}
}
}
}
scheduleLag:=make([]framework.PodLatencyData, 0)
startupLag:=make([]framework.PodLatencyData, 0)
watchLag:=make([]framework.PodLatencyData, 0)
schedToWatchLag:=make([]framework.PodLatencyData, 0)
e2eLag:=make([]framework.PodLatencyData, 0)
forname, create:=rangecreateTimes {
sched, ok:=scheduleTimes[name]
if!ok {
framework.Logf("Failed to find schedule time for %v", name)
missingMeasurements++
}
run, ok:=runTimes[name]
if!ok {
framework.Logf("Failed to find run time for %v", name)
missingMeasurements++
}
watch, ok:=watchTimes[name]
if!ok {
framework.Logf("Failed to find watch time for %v", name)
missingMeasurements++
}
node, ok:=nodeNames[name]
if!ok {
framework.Logf("Failed to find node for %v", name)
missingMeasurements++
}
scheduleLag=append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
startupLag=append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
watchLag=append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
schedToWatchLag=append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
e2eLag=append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
}
sort.Sort(framework.LatencySlice(scheduleLag))
sort.Sort(framework.LatencySlice(startupLag))
sort.Sort(framework.LatencySlice(watchLag))
sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(framework.LatencySlice(e2eLag))
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
framework.PrintLatencies(watchLag, "worst watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
// Test whether e2e pod startup time is acceptable.podStartupLatency:=&framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
f.TestSummaries=append(f.TestSummaries, podStartupLatency)
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
latencyMeasurementPhase.End()
By("Removing additional replication controllers")
podDeletionPhase:=testPhaseDurations.StartPhase(820, "latency pods deletion")
deferpodDeletionPhase.End()
deleteRC:=func(iint) {
deferGinkgoRecover()
name:=additionalPodsPrefix+"-"+strconv.Itoa(i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
}
workqueue.Parallelize(25, nodeCount, deleteRC)
podDeletionPhase.End()
}
cleanupDensityTest(dConfig, testPhaseDurations)
})
}
Click here to show extra information the analyzer produced.
No path was found through the callgraph that could lead to a function which writes a pointer argument.
No path was found through the callgraph that could lead to a function which passes a pointer to third-party code.
root signature {isCanonical 1} was not found in the callgraph; reference was passed directly to third-party code
Leave a reaction on this issue to contribute to the project by classifying this instance as a Bug 👎, Mitigated 👍, or Desirable Behavior 🚀
See the descriptions of the classifications here for more information.
Found a possible issue in se0g1/cve-2018-1002101 at test/e2e/scalability/density.go
Below is the message reported by the analyzer for this snippet of code. Beware that the analyzer only reports the first issue it finds, so please do not limit your consideration to the contents of the below message.
Click here to see the code in its original context.
Click here to show the 367 line(s) of Go which triggered the analyzer.
Click here to show extra information the analyzer produced.
Leave a reaction on this issue to contribute to the project by classifying this instance as a Bug 👎, Mitigated 👍, or Desirable Behavior 🚀
See the descriptions of the classifications here for more information.
commit ID: 08eb5fb80339f2738e62ab25142bde16debd4a60
The text was updated successfully, but these errors were encountered: