Skip to content

Commit

Permalink
Standardize image lifecycle and listing benchmarks.
Browse files Browse the repository at this point in the history
Bring the image-related benchmarks in line with the container and pod
benchmaks by parametrizing the benchmark settings and switching to
`gmeasure.experiment` for running the benchmarks.

Signed-off-by: Nashwan Azhari <nazhari@cloudbasesolutions.com>
  • Loading branch information
aznashwan committed May 9, 2022
1 parent 6426389 commit c518694
Show file tree
Hide file tree
Showing 2 changed files with 207 additions and 65 deletions.
257 changes: 192 additions & 65 deletions pkg/benchmark/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,109 +17,236 @@ limitations under the License.
package benchmark

import (
"path"
"runtime"
"time"

"github.com/golang/glog"
"github.com/kubernetes-sigs/cri-tools/pkg/framework"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gmeasure"
)

const (
defaultImagePullTimeoutSeconds = 1
defaultImageStatusTimeoutSeconds = 2
defaultImageRemoveTimeoutSeconds = 2
defaultImageListTimeoutSeconds = 2
defaultImageBenchmarkTimeoutSeconds = 10
)

var _ = framework.KubeDescribe("Container", func() {
var defaultImageListingBenchmarkImagesAmd64 = []string{
"busybox:1.26.2-glibc",
"busybox:1-uclibc",
"busybox:1",
"busybox:1-glibc",
"busybox:1-musl",
}
var defaultImageListingBenchmarkImages = []string{
"busybox:1",
"busybox:1-glibc",
"busybox:1-musl",
}

var _ = framework.KubeDescribe("Image", func() {
var ic internalapi.ImageManagerService
f := framework.NewDefaultCRIFramework()

var ic internalapi.ImageManagerService
var testImageList []string = framework.TestContext.BenchmarkingParams.ImageListingBenchmarkImages
if len(testImageList) == 0 {
if runtime.GOARCH == "amd64" {
testImageList = defaultImageListingBenchmarkImagesAmd64
} else {
testImageList = defaultImageListingBenchmarkImages
}
}

BeforeEach(func() {
ic = f.CRIClient.CRIImageClient
})

AfterEach(func() {
for _, imageName := range testImageList {
imageSpec := &runtimeapi.ImageSpec{
Image: imageName,
}
ic.RemoveImage(imageSpec)
}
})

Context("benchmark about operations on Image", func() {
var err error
It("benchmark about basic operations on Image", func() {
var err error

var testImageList []string
if runtime.GOARCH == "amd64" {
testImageList = []string{
"busybox:1.26.2-glibc",
"busybox:1-uclibc",
"busybox:1",
"busybox:1-glibc",
"busybox:1-musl",
imageBenchmarkTimeoutSeconds := defaultImageBenchmarkTimeoutSeconds
if framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds > 0 {
imageBenchmarkTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds
}
} else {
testImageList = []string{
"busybox:1",
"busybox:1-glibc",
"busybox:1-musl",
}
}

AfterEach(func() {
for _, imageName := range testImageList {
imageSpec := &runtimeapi.ImageSpec{
Image: imageName,
}
ic.RemoveImage(imageSpec)
imagePullingBenchmarkImage := framework.TestContext.BenchmarkingParams.ImagePullingBenchmarkImage
// NOTE(aznashwan): default to using first test image from listing benchmark images:
if imagePullingBenchmarkImage == "" {
imagePullingBenchmarkImage = testImageList[0]
glog.Infof("Defaulting to using following image: %s", imagePullingBenchmarkImage)
}
})

imagePullTimeoutSeconds := defaultImagePullTimeoutSeconds
imageStatusTimeoutSeconds := defaultImageStatusTimeoutSeconds
imageRemoveTimeoutSeconds := defaultImageRemoveTimeoutSeconds
imageListTimeoutSeconds := defaultImageListTimeoutSeconds
if framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds > 0 {
imagePullTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds
imageStatusTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds
imageRemoveTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds
imageListTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds
}
// Setup shared sampling config from TestContext:
samplingConfig := gmeasure.SamplingConfig{
N: framework.TestContext.BenchmarkingParams.ImagesNumber,
NumParallel: framework.TestContext.BenchmarkingParams.ImagesNumberParallel,
}
if samplingConfig.N < 1 {
samplingConfig.N = 1
}
if samplingConfig.NumParallel < 1 {
samplingConfig.NumParallel = 1
}

Measure("benchmark about basic operations on Image", func(b Benchmarker) {
imageSpec := &runtimeapi.ImageSpec{
Image: testImageList[0],
// Setup image lifecycle results reporting channel:
lifecycleResultsSet := LifecycleBenchmarksResultsSet{
OperationsNames: []string{"PullImage", "StatusImage", "RemoveImage"},
NumParallel: samplingConfig.NumParallel,
Datapoints: make([]LifecycleBenchmarkDatapoint, 0),
}
lifecycleResultsManager := NewLifecycleBenchmarksResultsManager(
lifecycleResultsSet,
imageBenchmarkTimeoutSeconds,
)
lifecycleResultsChannel := lifecycleResultsManager.StartResultsConsumer()

// Image lifecycle benchmark experiment:
experiment := gmeasure.NewExperiment("ImageLifecycle")
experiment.Sample(func(idx int) {
var err error
var lastStartTime, lastEndTime int64
durations := make([]int64, len(lifecycleResultsSet.OperationsNames))

operation := b.Time("pull Image", func() {
framework.PullPublicImage(ic, testImageList[0], nil)
})
Expect(operation.Minutes()).Should(BeNumerically("<", imagePullTimeoutSeconds), "pull Image shouldn't take too long.")
imageSpec := &runtimeapi.ImageSpec{
Image: imagePullingBenchmarkImage,
}

operation = b.Time("Image status", func() {
_, err = ic.ImageStatus(imageSpec, false)
})
By("Pull Image")
startTime := time.Now().UnixNano()
lastStartTime = startTime
imageId := framework.PullPublicImage(ic, imagePullingBenchmarkImage, nil)
lastEndTime = time.Now().UnixNano()
durations[0] = lastEndTime - lastStartTime

framework.ExpectNoError(err, "failed to get image status: %v", err)
Expect(operation.Seconds()).Should(BeNumerically("<", imageStatusTimeoutSeconds), "get image status shouldn't take too long.")
By("Status Image")
lastStartTime = startTime
_, err = ic.ImageStatus(imageSpec, false)
lastEndTime = time.Now().UnixNano()
durations[1] = lastEndTime - lastStartTime
framework.ExpectNoError(err, "failed to status Image: %v", err)

operation = b.Time("remove Image", func() {
By("Remove Image")
lastStartTime = startTime
err = ic.RemoveImage(imageSpec)
})
lastEndTime = time.Now().UnixNano()
durations[2] = lastEndTime - lastStartTime
framework.ExpectNoError(err, "failed to remove Image: %v", err)

res := LifecycleBenchmarkDatapoint{
SampleIndex: idx,
StartTime: startTime,
EndTime: lastEndTime,
OperationsDurationsNs: durations,
MetaInfo: map[string]string{"imageId": imageId},
}
lifecycleResultsChannel <- &res

}, samplingConfig)

framework.ExpectNoError(err, "failed to remove image: %v", err)
Expect(operation.Seconds()).Should(BeNumerically("<", imageRemoveTimeoutSeconds), "remove Image shouldn't take too long.")
// Send nil and give the manager a minute to process any already-queued results:
lifecycleResultsChannel <- nil
err = lifecycleResultsManager.AwaitAllResults(60)
if err != nil {
glog.Errorf("Results manager failed to await all results: %s", err)
}

if framework.TestContext.BenchmarkingOutputDir != "" {
filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "image_lifecycle_benchmark_data.json")
err = lifecycleResultsManager.WriteResultsFile(filepath)
if err != nil {
glog.Errorf("Error occurred while writing benchmark results to file %s: %s", filepath, err)
}
} else {
glog.Infof("No benchmarking out dir provided, skipping writing benchmarking results.")
glog.Infof("Image lifecycle results were: %+v", lifecycleResultsManager.resultsSet)
}
})

}, defaultOperationTimes)
It("benchmark about listing Image", func() {
var err error

Measure("benchmark about listing Image", func(b Benchmarker) {
for _, imageName := range testImageList {
framework.PullPublicImage(ic, imageName, nil)
imageBenchmarkTimeoutSeconds := defaultImageBenchmarkTimeoutSeconds
if framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds > 0 {
imageBenchmarkTimeoutSeconds = framework.TestContext.BenchmarkingParams.ImageBenchmarkTimeoutSeconds
}

operation := b.Time("list Container", func() {
// Setup shared sampling config from TestContext:
samplingConfig := gmeasure.SamplingConfig{
N: framework.TestContext.BenchmarkingParams.ImagesNumber,
NumParallel: framework.TestContext.BenchmarkingParams.ImagesNumberParallel,
}
if samplingConfig.N < 1 {
samplingConfig.N = 1
}
if samplingConfig.NumParallel < 1 {
samplingConfig.NumParallel = 1
}
// Setup image lifecycle results reporting channel:
imageListResultsSet := LifecycleBenchmarksResultsSet{
OperationsNames: []string{"ListImages"},
NumParallel: samplingConfig.NumParallel,
Datapoints: make([]LifecycleBenchmarkDatapoint, 0),
}
imageListResultsManager := NewLifecycleBenchmarksResultsManager(
imageListResultsSet,
imageBenchmarkTimeoutSeconds,
)
imagesResultsChannel := imageListResultsManager.StartResultsConsumer()

// Image listing benchmark experiment:
experiment := gmeasure.NewExperiment("ImageListing")
experiment.Sample(func(idx int) {
var err error
durations := make([]int64, len(imageListResultsSet.OperationsNames))

By("List Images")
startTime := time.Now().UnixNano()
_, err = ic.ListImages(nil)
})
endTime := time.Now().UnixNano()
durations[0] = endTime - startTime
framework.ExpectNoError(err, "failed to List images: %v", err)

res := LifecycleBenchmarkDatapoint{
SampleIndex: idx,
StartTime: startTime,
EndTime: endTime,
OperationsDurationsNs: durations,
MetaInfo: nil,
}
imagesResultsChannel <- &res

}, samplingConfig)

framework.ExpectNoError(err, "failed to list Image: %v", err)
Expect(operation.Seconds()).Should(BeNumerically("<", imageListTimeoutSeconds), "list Image shouldn't take too long.")
}, defaultOperationTimes)
// Send nil and give the manager a minute to process any already-queued results:
imagesResultsChannel <- nil
err = imageListResultsManager.AwaitAllResults(60)
if err != nil {
glog.Errorf("Results manager failed to await all results: %s", err)
}

if framework.TestContext.BenchmarkingOutputDir != "" {
filepath := path.Join(framework.TestContext.BenchmarkingOutputDir, "image_listing_benchmark_data.json")
err = imageListResultsManager.WriteResultsFile(filepath)
if err != nil {
glog.Errorf("Error occurred while writing benchmark results to file %s: %s", filepath, err)
}
} else {
glog.Infof("No benchmarking out dir provided, skipping writing benchmarking results.")
glog.Infof("Image listing results were: %+v", imageListResultsManager.resultsSet)
}
})
})
})
15 changes: 15 additions & 0 deletions pkg/framework/test_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,25 @@ type BenchmarkingParamsType struct {
// for a Pod lifecycle benchmark to take.
PodBenchmarkTimeoutSeconds int `yaml:"podBenchmarkTimeoutSeconds"`

// ImagesNumber is the number of Images to run tests on in image-related benchmarks.
ImagesNumber int `yaml:"imagesNumber"`

// ImagesNumberParallel is the maximum number of image-related benchmarks
// to run in parallel.
ImagesNumberParallel int `yaml:"imagesNumberParallel"`

// ImageBenchmarkTimeoutSeconds is the maximum of seconds acceptable for
// image-related benchmarks.
ImageBenchmarkTimeoutSeconds int `yaml:"imageBenchmarkTimeoutSeconds"`

// ImagePullingBenchmarkImage is the string ref to the image to be used in
// image pulling benchmarks. Internally defaults to BusyBox.
ImagePullingBenchmarkImage string `yaml:"imagePullingBenchmarkImage"`

// ImageListingBenchmarkImages is a list of string image refs to query
// during image listing benchmarks.
ImageListingBenchmarkImages []string `yaml:"imageListingBenchmarkImages"`

// ImageBenchmarkTimeoutSeconds is the maximum of seconds acceptable for
// benchmarks focused on Pod+Container start performance.
PodContainerStartBenchmarkTimeoutSeconds int `yaml:"podContainerStartBenchmarkTimeoutSeconds"`
Expand Down

0 comments on commit c518694

Please sign in to comment.