Skip to content

Commit

Permalink
Merge pull request microsoft#981 from anmaxvl/maksiman/tests/scale-cp…
Browse files Browse the repository at this point in the history
…u-limits

Add test for ScaleCPULimitsToSandbox runtime config
  • Loading branch information
anmaxvl committed Mar 25, 2021
2 parents 77f39d6 + a83893c commit 10f8422
Show file tree
Hide file tree
Showing 5 changed files with 165 additions and 12 deletions.
16 changes: 4 additions & 12 deletions test/cri-containerd/logging_binary_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
Expand All @@ -24,18 +25,9 @@ func Test_Run_Container_With_Binary_Logger(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

logBinaryRoot := os.Getenv("TEST_BINARY_ROOT")
if logBinaryRoot == "" {
logBinaryRoot = "/ContainerPlat"
}

binaryPath := logBinaryRoot + "/sample-logging-driver.exe"

if _, err := os.Stat(binaryPath); os.IsNotExist(err) {
t.Skip("skipping: sample logging driver missing")
}
binaryPath := requireBinary(t, "sample-logging-driver.exe")

logPath := "binary://" + binaryPath
logPath := "binary:///" + binaryPath

type config struct {
name string
Expand Down Expand Up @@ -97,7 +89,7 @@ func Test_Run_Container_With_Binary_Logger(t *testing.T) {
podID := runPodSandbox(t, client, ctx, podReq)
defer removePodSandbox(t, client, ctx, podID)

logFileName := fmt.Sprintf("%s/stdout-%s.txt", logBinaryRoot, test.name)
logFileName := fmt.Sprintf(`%s\stdout-%s.txt`, filepath.Dir(binaryPath), test.name)
conReq := getCreateContainerRequest(podID, test.containerName, test.containerImage, test.cmd, podReq.Config)
conReq.Config.LogPath = logPath + fmt.Sprintf("?%s", logFileName)

Expand Down
27 changes: 27 additions & 0 deletions test/cri-containerd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"flag"
"fmt"
"os"
"path/filepath"
"testing"
"time"

Expand Down Expand Up @@ -120,14 +121,36 @@ func requireFeatures(t *testing.T, features ...string) {
}
}

// requireBinary checks if `binary` exists in the same directory as the test
// binary.
// Returns full binary path if it exists, otherwise, skips the test.
func requireBinary(t *testing.T, binary string) string {
executable, err := os.Executable()
if err != nil {
t.Skipf("error locating executable: %s", err)
return ""
}
baseDir := filepath.Dir(executable)
binaryPath := filepath.Join(baseDir, binary)
if _, err := os.Stat(binaryPath); os.IsNotExist(err) {
t.Skipf("binary not found: %s", binaryPath)
return ""
}
return binaryPath
}

func getWindowsNanoserverImage(build uint16) string {
switch build {
case osversion.RS5:
return "mcr.microsoft.com/windows/nanoserver:1809"
case osversion.V19H1:
return "mcr.microsoft.com/windows/nanoserver:1903"
case osversion.V19H2:
return "mcr.microsoft.com/windows/nanoserver:1909"
case osversion.V20H1:
return "mcr.microsoft.com/windows/nanoserver:2004"
case osversion.V20H2:
return "mcr.microsoft.com/windows/nanoserver:2009"
default:
panic("unsupported build")
}
Expand All @@ -139,8 +162,12 @@ func getWindowsServerCoreImage(build uint16) string {
return "mcr.microsoft.com/windows/servercore:1809"
case osversion.V19H1:
return "mcr.microsoft.com/windows/servercore:1903"
case osversion.V19H2:
return "mcr.microsoft.com/windows/servercore:1909"
case osversion.V20H1:
return "mcr.microsoft.com/windows/servercore:2004"
case osversion.V20H2:
return "mcr.microsoft.com/windows/servercore:2009"
default:
panic("unsupported build")
}
Expand Down
72 changes: 72 additions & 0 deletions test/cri-containerd/scale_cpu_limits_to_sandbox_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
package cri_containerd

import (
"context"
"math"
"runtime"
"strconv"
"testing"
"time"

criruntime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)

const imageWindowsMaxCPUWorkload = "cplatpublic.azurecr.io/golang-1.16.2-nanoserver-1809:max-cpu-workload"

func Test_Scale_CPU_Limits_To_Sandbox(t *testing.T) {
requireFeatures(t, featureWCOWHypervisor)

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

client := newTestRuntimeClient(t)
podReq := getRunPodSandboxRequest(t, wcowHypervisor17763RuntimeHandler)
podID := runPodSandbox(t, client, ctx, podReq)
defer removePodSandbox(t, client, ctx, podID)

pullRequiredImages(t, []string{imageWindowsMaxCPUWorkload})

cmd := []string{"cmd", "/c", `C:\load_cpu.exe`}
contReq := getCreateContainerRequest(podID, "nanoserver-load-cpu", imageWindowsMaxCPUWorkload, cmd, podReq.Config)
// set the limit to (roughly) 1 processor
processorLimit := 10000 / runtime.NumCPU()
contReq.Config.Annotations = map[string]string{
"io.microsoft.container.processor.limit": strconv.Itoa(processorLimit),
}

contID := createContainer(t, client, ctx, contReq)
defer removeContainer(t, client, ctx, contID)
startContainer(t, client, ctx, contID)
defer stopContainer(t, client, ctx, contID)

statsRequest := &criruntime.ContainerStatsRequest{
ContainerId: contID,
}

// baseline container stats request
initialResponse, err := client.ContainerStats(ctx, statsRequest)
if err != nil {
t.Fatalf("error getting initial container stats: %s", err)
}

// give it 5 seconds for a better average, with just 1 second, the measurements
// are consistently 25-30% higher than expected
time.Sleep(5 * time.Second)

// final container stats request
finalResponse, err := client.ContainerStats(ctx, statsRequest)
if err != nil {
t.Fatalf("error getting container new container stats: %s", err)
}

// Estimate CPU usage by dividing total usage in nanoseconds by time passed in nanoseconds
oldStats := initialResponse.GetStats().GetCpu()
newStats := finalResponse.GetStats().GetCpu()
deltaTime := newStats.GetTimestamp() - oldStats.GetTimestamp()
deltaUsage := newStats.GetUsageCoreNanoSeconds().GetValue() - oldStats.GetUsageCoreNanoSeconds().GetValue()
usagePercentage := float64(deltaUsage) / float64(deltaTime) * 100
t.Logf("container CPU usage percentage: %f", usagePercentage)
if math.Abs(usagePercentage-100) > 10 {
t.Fatalf("expected CPU usage around 100 percent, got %f instead. Make sure that ScaleCpuLimitsToSandbox runtime option is set to true", usagePercentage)
}
}
19 changes: 19 additions & 0 deletions test/testdata/scale_cpu_limits_to_sandbox/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# This Dockerfile builds a docker image based on golang:1.16.2-nanoserver-1809.
# The image is used in test/cri-containerd/scale_cpu_limits_to_sandbox.go.
# If any changes are made to this Dockerfile, make sure to update the tests
# accordingly.

# Base image
FROM golang:1.16.2-nanoserver-1809

# Get administrator privileges
USER ContainerAdministrator

# Put everything in the root directory
WORKDIR /

# Copy the source file
COPY main.go .

# Build binary
RUN go build -o load_cpu.exe main.go
43 changes: 43 additions & 0 deletions test/testdata/scale_cpu_limits_to_sandbox/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
package main

import (
"fmt"
"os"
"runtime"
"strconv"
"time"
)

const defaultDuration = 5

// This implementation is a simplified version of https://github.com/vikyd/go-cpu-load
func main() {
cores := runtime.NumCPU()
runtime.GOMAXPROCS(cores)

loadDuration := defaultDuration
// Check if duration has been passed explicitly
if len(os.Args) > 1 {
var err error
loadDuration, err = strconv.Atoi(os.Args[1])
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "first argument must be integer: %s", err)
os.Exit(1)
}
}

for i := 0; i < cores; i++ {
go func() {
runtime.LockOSThread()
defer runtime.UnlockOSThread()

begin := time.Now()
for {
if time.Now().Sub(begin) > time.Duration(loadDuration)*time.Second {
break
}
}
}()
}
time.Sleep(time.Duration(loadDuration) * time.Second)
}

0 comments on commit 10f8422

Please sign in to comment.