Skip to content

Commit

Permalink
Embed ocp config files (#214)
Browse files Browse the repository at this point in the history
* Embed config files

Signed-off-by: Raul Sevilla <rsevilla@redhat.com>

* Update docs

Signed-off-by: Raul Sevilla <rsevilla@redhat.com>
  • Loading branch information
rsevilla87 committed Nov 28, 2022
1 parent 1499276 commit ce1b00e
Show file tree
Hide file tree
Showing 28 changed files with 89 additions and 23 deletions.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
8 changes: 7 additions & 1 deletion cmd/kube-burner/ocp.go
Expand Up @@ -15,15 +15,21 @@
package main

import (
"embed"
"fmt"
"log"
"strings"

_ "embed"

"github.com/cloud-bulldozer/kube-burner/pkg/workloads"
uid "github.com/satori/go.uuid"
"github.com/spf13/cobra"
)

//go:embed ocp-config/*
var OCPConfig embed.FS

func openShiftCmd() *cobra.Command {
ocpCmd := &cobra.Command{
Use: "ocp",
Expand All @@ -48,7 +54,7 @@ func openShiftCmd() *cobra.Command {
"BURST": fmt.Sprintf("%d", *burst),
"INDEXING": fmt.Sprintf("%v", indexing),
}
wh = workloads.NewWorkloadHelper(envVars, *alerting)
wh = workloads.NewWorkloadHelper(envVars, *alerting, OCPConfig)
wh.Metadata.UUID = *uuid
if *esServer != "" {
err := wh.GatherMetadata()
Expand Down
20 changes: 15 additions & 5 deletions docs/ocp.md
Expand Up @@ -4,7 +4,7 @@ The kube-burner binary brings a very opinionated OpenShift wrapper designed to s
This wrapper is hosted under the `kube-burner ocp` subcommand that currently looks like:

```console
$ ./bin/amd64/kube-burner ocp
$ kube-burner ocp
This subcommand is meant to be used against OpenShift clusters and serve as a shortcut to trigger well-known workloads

Usage:
Expand Down Expand Up @@ -32,20 +32,30 @@ Use "kube-burner ocp [command] --help" for more information about a command.

## Usage

In order to trigger one of the supported workloads using this subcommand you have to run kube-burner within the directory of the desired workload. The workloads are stored in the ocp-config directory of this repository. i.e:
In order to trigger one of the supported workloads using this subcommand you have to run kube-burner using the subcommand ocp. The workloads are embed in the kube-burner binary:

Running node-density with 100 pods per node

```console
~/kube-burner $ cd ocp-config/node-density
~/kube-burner/ocp-config/node-density $ kube-burner ocp node-density --pods-per-node=100
$ kube-burner ocp node-density --pods-per-node=100
$
```

With the command above, the wrapper will calculate the required number of pods to deploy across all worker nodes of the cluster.

This wrapper provides the following benefits:
This wrapper provides the following benefits among others:

- Provides a simplified execution of the supported workloads
- Indexes OpenShift metadata along with the Benchmark result, this document can be found with the following query: `uuid: <benchmkark-uuid> AND metricName.keyword: "clusterMetadata"`
- Prevents modifying configuration files to tweak some of the parameters of the workloads
- Discovers the Prometheus URL and authentication token, so the user does not have to perform those operations before using them.

It's also possible to customize the configuration before running the workload by extracting and them running it:

```console
$ kube-burner ocp node-density --extract
$ ls
alerts.yml metrics.yml node-density.yml pod.yml
$ vi node-density.yml # Perform modifications accordingly
$ kube-burner ocp node-density --pods-per-node=100 # Run workload
```
1 change: 1 addition & 0 deletions ocp-config
13 changes: 11 additions & 2 deletions pkg/workloads/cluster-density.go
Expand Up @@ -19,18 +19,26 @@ import (
"os"
"time"

"github.com/cloud-bulldozer/kube-burner/log"

"github.com/spf13/cobra"
)

// NewClusterDensity holds cluster-density workload
func NewClusterDensity(wh *WorkloadHelper) *cobra.Command {
var iterations, churnPercent int
var churn bool
var churn, extract bool
var churnDelay, churnDuration time.Duration
cmd := &cobra.Command{
Use: "cluster-density",
Short: "Runs cluster-density workload",
PreRun: func(cmd *cobra.Command, args []string) {
if extract {
if err := wh.extractWorkload(cmd.Name()); err != nil {
log.Fatal(err)
}
os.Exit(0)
}
wh.Metadata.Benchmark = cmd.Name()
os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations))
os.Setenv("CHURN", fmt.Sprint(churn))
Expand All @@ -39,14 +47,15 @@ func NewClusterDensity(wh *WorkloadHelper) *cobra.Command {
os.Setenv("CHURN_PERCENT", fmt.Sprint(churnPercent))
},
Run: func(cmd *cobra.Command, args []string) {
wh.run("cluster-density.yml")
wh.run(cmd.Name())
},
}
cmd.Flags().IntVar(&iterations, "iterations", 0, "Cluster-density iterations")
cmd.Flags().BoolVar(&churn, "churn", true, "Enable churning")
cmd.Flags().DurationVar(&churnDuration, "churn-duration", 1*time.Hour, "Churn duration")
cmd.Flags().DurationVar(&churnDelay, "churn-delay", 30*time.Second, "Time to wait between each churn")
cmd.Flags().IntVar(&churnPercent, "churn-percent", 10, "Percentage of job iterations that kube-burner will churn each round")
cmd.Flags().BoolVar(&extract, "extract", false, "Extract workload in the current directory")
cmd.MarkFlagRequired("iterations")
return cmd
}
39 changes: 34 additions & 5 deletions pkg/workloads/helpers.go
Expand Up @@ -16,11 +16,13 @@ package workloads

import (
"bytes"
"embed"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path"
"time"

"github.com/cloud-bulldozer/kube-burner/log"
Expand All @@ -35,6 +37,7 @@ const (
metricsProfile = "metrics.yml"
alertsProfile = "alerts.yml"
metadataMetricName = "clusterMetadata"
ocpCfgDir = "ocp-config"
)

type WorkloadHelper struct {
Expand All @@ -43,6 +46,7 @@ type WorkloadHelper struct {
prometheusToken string
Metadata clusterMetadata
alerting bool
ocpConfig embed.FS
}

type clusterMetadata struct {
Expand All @@ -66,10 +70,11 @@ type clusterMetadata struct {
}

// NewWorkloadHelper initializes workloadHelper
func NewWorkloadHelper(envVars map[string]string, alerting bool) WorkloadHelper {
func NewWorkloadHelper(envVars map[string]string, alerting bool, ocpConfig embed.FS) WorkloadHelper {
return WorkloadHelper{
envVars: envVars,
alerting: alerting,
envVars: envVars,
alerting: alerting,
ocpConfig: ocpConfig,
}
}

Expand Down Expand Up @@ -139,10 +144,17 @@ func (wh *WorkloadHelper) IndexMetadata() {
}
}

func (wh *WorkloadHelper) run(configFile string) {
func (wh *WorkloadHelper) run(workload string) {
var rc int
var alertM *alerting.AlertManager
configSpec, err := config.Parse(configFile, true)
cfg := fmt.Sprintf("%s.yml", workload)
if _, err := os.Stat(cfg); err != nil {
log.Debug("Workload not available in the current directory, extracting it")
if err := wh.extractWorkload(workload); err != nil {
log.Fatalf("Error extracting workload: %v", err)
}
}
configSpec, err := config.Parse(cfg, true)
if err != nil {
log.Fatal(err)
}
Expand All @@ -165,3 +177,20 @@ func (wh *WorkloadHelper) run(configFile string) {
wh.IndexMetadata()
os.Exit(rc)
}

func (wh *WorkloadHelper) extractWorkload(workload string) error {
dirContent, err := wh.ocpConfig.ReadDir(path.Join(ocpCfgDir, workload))
if err != nil {
return err
}
for _, f := range dirContent {
fileContent, _ := wh.ocpConfig.ReadFile(path.Join(ocpCfgDir, workload, f.Name()))
fd, err := os.Create(f.Name())
if err != nil {
return err
}
fd.Write(fileContent)
fd.Close()
}
return nil
}
13 changes: 11 additions & 2 deletions pkg/workloads/node-density-heavy.go
Expand Up @@ -16,10 +16,11 @@ package workloads

import (
"fmt"
"log"
"os"
"time"

"github.com/cloud-bulldozer/kube-burner/log"

"github.com/cloud-bulldozer/kube-burner/pkg/discovery"
"github.com/spf13/cobra"
)
Expand All @@ -28,11 +29,18 @@ import (
func NewNodeDensityHeavy(wh *WorkloadHelper) *cobra.Command {
var podsPerNode, probesPeriod int
var podReadyThreshold time.Duration
var extract bool
cmd := &cobra.Command{
Use: "node-density-heavy",
Short: "Runs node-density-heavy workload",
SilenceUsage: true,
PreRun: func(cmd *cobra.Command, args []string) {
if extract {
if err := wh.extractWorkload(cmd.Name()); err != nil {
log.Fatal(err)
}
os.Exit(0)
}
wh.Metadata.Benchmark = cmd.Name()
workerNodeCount, err := discovery.GetWorkerNodeCount()
if err != nil {
Expand All @@ -50,11 +58,12 @@ func NewNodeDensityHeavy(wh *WorkloadHelper) *cobra.Command {
os.Setenv("PROBES_PERIOD", fmt.Sprint(probesPeriod))
},
Run: func(cmd *cobra.Command, args []string) {
wh.run("node-density-heavy.yml")
wh.run(cmd.Name())
},
}
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 1*time.Hour, "Pod ready timeout threshold")
cmd.Flags().IntVar(&probesPeriod, "probes-period", 10, "Perf app readiness/livenes probes period in seconds")
cmd.Flags().IntVar(&podsPerNode, "pods-per-node", 245, "Pods per node")
cmd.Flags().BoolVar(&extract, "extract", false, "Extract workload in the current directory")
return cmd
}
10 changes: 9 additions & 1 deletion pkg/workloads/node-density.go
Expand Up @@ -30,11 +30,18 @@ func NewNodeDensity(wh *WorkloadHelper) *cobra.Command {
var podsPerNode int
var podReadyThreshold time.Duration
var containerImage string
var extract bool
cmd := &cobra.Command{
Use: "node-density",
Short: "Runs node-density workload",
SilenceUsage: true,
PreRun: func(cmd *cobra.Command, args []string) {
if extract {
if err := wh.extractWorkload(cmd.Name()); err != nil {
log.Fatal(err)
}
os.Exit(0)
}
wh.Metadata.Benchmark = cmd.Name()
workerNodeCount, err := discovery.GetWorkerNodeCount()
if err != nil {
Expand All @@ -51,11 +58,12 @@ func NewNodeDensity(wh *WorkloadHelper) *cobra.Command {
os.Setenv("CONTAINER_IMAGE", containerImage)
},
Run: func(cmd *cobra.Command, args []string) {
wh.run("node-density.yml")
wh.run(cmd.Name())
},
}
cmd.Flags().IntVar(&podsPerNode, "pods-per-node", 245, "Pods per node")
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 5*time.Second, "Pod ready timeout threshold")
cmd.Flags().StringVar(&containerImage, "container-image", "gcr.io/google_containers/pause:3.1", "Container image")
cmd.Flags().BoolVar(&extract, "extract", false, "Extract workload in the current directory")
return cmd
}
8 changes: 1 addition & 7 deletions test/run-ocp.sh
Expand Up @@ -12,17 +12,11 @@ ES_INDEX="kube-burner-ocp"
COMMON_FLAGS="--es-server=${ES_SERVER} --es-index=${ES_INDEX} --alerting=false --uuid=${UUID}"

echo "Running node-density wrapper"
pushd ../ocp-config/node-density
kube-burner ocp node-density --pods-per-node=75 --pod-ready-threshold=10s --container-image=gcr.io/google_containers/pause:3.0 ${COMMON_FLAGS}
oc delete ns -l kube-burner-uuid=${UUID}
popd
echo "Running node-density-heavy wrapper"
pushd ../ocp-config/node-density-heavy
kube-burner ocp node-density-heavy --pods-per-node=75 ${COMMON_FLAGS} --qps=5 --burst=5
oc delete ns -l kube-burner-uuid=${UUID}
popd
echo "Running cluster-density wrapper"
pushd ../ocp-config/cluster-density
kube-burner ocp cluster-density --iterations=3 --churn-duration=5m ${COMMON_FLAGS}
oc delete ns -l kube-burner-uuid=${UUID}
popd
oc delete ns -l kube-burner-uuid=${UUID}

0 comments on commit ce1b00e

Please sign in to comment.