Skip to content

Commit

Permalink
Local indexer (#259)
Browse files Browse the repository at this point in the history
* Move commons to util/metrics

Signed-off-by: Raul Sevilla <rsevilla@redhat.com>

* Move writeToFile logic to local indexer

Signed-off-by: Raul Sevilla <rsevilla@redhat.com>

* Update tests parameters

Signed-off-by: Raul Sevilla <rsevilla@redhat.com>

---------

Signed-off-by: Raul Sevilla <rsevilla@redhat.com>
  • Loading branch information
rsevilla87 committed Mar 1, 2023
1 parent 92a429b commit feec442
Show file tree
Hide file tree
Showing 39 changed files with 210 additions and 255 deletions.
25 changes: 17 additions & 8 deletions cmd/kube-burner/kube-burner.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ import (
"github.com/cloud-bulldozer/kube-burner/log"
"github.com/cloud-bulldozer/kube-burner/pkg/alerting"
"github.com/cloud-bulldozer/kube-burner/pkg/burner"
"github.com/cloud-bulldozer/kube-burner/pkg/commons"
"github.com/cloud-bulldozer/kube-burner/pkg/config"
"github.com/cloud-bulldozer/kube-burner/pkg/util/metrics"
"github.com/cloud-bulldozer/kube-burner/pkg/version"

"github.com/cloud-bulldozer/kube-burner/pkg/indexers"
Expand Down Expand Up @@ -102,8 +102,12 @@ func initCmd() *cobra.Command {
// We assume configFile is config.yml
configFile = "config.yml"
}
metricsScraper := commons.ProcessMetricsScraperConfig(commons.MetricsScraperConfig{
ConfigFile: configFile,
configSpec, err := config.Parse(configFile, false)
if err != nil {
log.Fatal(err.Error())
}
metricsScraper := metrics.ProcessMetricsScraperConfig(metrics.ScraperConfig{
ConfigSpec: configSpec,
Password: password,
PrometheusStep: prometheusStep,
MetricsEndpoint: metricsEndpoint,
Expand All @@ -116,7 +120,7 @@ func initCmd() *cobra.Command {
UUID: uuid,
UserMetaData: userMetadata,
})
rc, err = burner.Run(metricsScraper.ConfigSpec, uuid, metricsScraper.PrometheusClients, metricsScraper.AlertMs, metricsScraper.Indexer, timeout, metricsScraper.UserMetadataContent)
rc, err = burner.Run(configSpec, uuid, metricsScraper.PrometheusClients, metricsScraper.AlertMs, metricsScraper.Indexer, timeout, metricsScraper.UserMetadataContent)
if err != nil {
log.Fatalf(err.Error())
}
Expand Down Expand Up @@ -187,8 +191,12 @@ func indexCmd() *cobra.Command {
log.Info("👋 Exiting kube-burner ", uuid)
},
Run: func(cmd *cobra.Command, args []string) {
_ = commons.ProcessMetricsScraperConfig(commons.MetricsScraperConfig{
ConfigFile: configFile,
configSpec, err := config.Parse(configFile, false)
if err != nil {
log.Fatal(err.Error())
}
_ = metrics.ProcessMetricsScraperConfig(metrics.ScraperConfig{
ConfigSpec: configSpec,
Password: password,
PrometheusStep: prometheusStep,
MetricsEndpoint: metricsEndpoint,
Expand Down Expand Up @@ -239,15 +247,16 @@ func importCmd() *cobra.Command {
if err != nil {
log.Fatal(err.Error())
}
err = prometheus.ImportTarball(tarball, configSpec.GlobalConfig.IndexerConfig.DefaultIndex, indexer)
err = metrics.ImportTarball(tarball, indexer)
if err != nil {
log.Fatal(err.Error())
}
},
}
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Config file path or URL")
cmd.Flags().StringVar(&tarball, "tarball", "", "Metrics tarball file")
cmd.MarkFlagsRequiredTogether("config", "tarball")
cmd.MarkFlagRequired("config")
cmd.MarkFlagRequired("tarball")
return cmd
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
---
global:
writeToFile: false
gc: {{.GC}}
indexerConfig:
enabled: {{.INDEXING}}
Expand All @@ -10,7 +9,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: {{.ES_INDEX}}
jobs:
- name: cluster-density-v2
namespace: cluster-density-v2
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
---
global:
writeToFile: false
gc: {{.GC}}
indexerConfig:
enabled: {{.INDEXING}}
Expand All @@ -10,7 +9,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: {{.ES_INDEX}}
jobs:
- name: cluster-density
namespace: cluster-density
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
---
global:
writeToFile: false
gc: {{.GC}}
indexerConfig:
enabled: {{.INDEXING}}
Expand All @@ -10,7 +9,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: {{.ES_INDEX}}
jobs:
- name: node-density-cni
namespace: node-density-cni
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
---
global:
writeToFile: false
gc: {{.GC}}
indexerConfig:
enabled: {{.INDEXING}}
Expand All @@ -10,7 +9,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: {{.ES_INDEX}}
thresholds:
- conditionType: Ready
metric: P99
Expand Down
2 changes: 0 additions & 2 deletions cmd/kube-burner/ocp-config/node-density/node-density.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
---
global:
writeToFile: false
gc: {{.GC}}
indexerConfig:
enabled: {{.INDEXING}}
Expand All @@ -10,7 +9,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: {{.ES_INDEX}}
thresholds:
- conditionType: Ready
metric: P99
Expand Down
3 changes: 0 additions & 3 deletions docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ In this section is described global job configuration, it holds the following pa

| Option | Description | Type | Example | Default |
|------------------|----------------------------------------------------------------------------------------------------------|----------------|----------------|-------------|
| writeToFile | Whether to dump collected metrics to files | Boolean | true | false |
| createTarball | Create metrics tarball, it has no effect if `writeToFile` is not enabled | Boolean | true | false |
| metricsDirectory | Directory where collected metrics will be dumped into. It will be created if it doesn't exist previously | String | ./metrics | ./collected-metrics |
| measurements | List of measurements. Detailed in the [measurements section] | List | - | [] |
| indexerConfig | Holds the indexer configuration. Detailed in the [indexers section] | Object | - | - |
| requestTimeout | Client-go request timeout | Duration | 5s | 15s |
Expand Down
18 changes: 15 additions & 3 deletions docs/indexers.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,9 @@ The indexer configuration is described in the `indexerConfig` section and can be

Index documents in Elasticsearch 7 instances.

In addition, each indexer has its own configuration parameters.

----

The `elastic` indexer is configured by the parameters below:
The `elastic` indexer can be configured by the parameters below:

| Option | Description | Type | Example | Default |
|----------------------|---------------------------------------------------|-------------|------------------------------------------|---------|
Expand All @@ -28,3 +26,17 @@ The `elastic` indexer is configured by the parameters below:
| insecureSkipVerify | TLS certificate verification | Boolean | true | false |

**Note**: It's possible to index documents in an authenticated ES instance using the notation `http(s)://[username]:[password]@[address]:[port]` in the *esServers* parameter.

### Local

Writes collected metrics to local files.

----

The `local` indexer can be configured by the parameters below:

| Option | Description | Type | Example | Default |
|----------------------|---------------------------------------------------|-------------|------------------------------------------|---------|
| metricsDirectory | Directory where collected metrics will be dumped into. It will be created if it doesn't exist previously | String | /var/tmp/myMetrics | collected-metrics |
| createTarball | Create metrics tarball | Boolean | true | false |
| tarballName | Name of the metrics tarball | String | myMetrics.tgz | kube-burner-metrics.tgz |
8 changes: 2 additions & 6 deletions docs/measurements.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ Apart from prometheus metrics collection, Kube-burner allows to get further metr
Measurements are enabled in the measurements section of the configuration file. This section contains a list of measurements with their options.
'kube-burner' supports the following measurements so far:

Note: podLatency is only captured when a benchmark is triggered. It does not work with the "index" mode of kube-burner
**Note:** podLatency measurement is only captured when a benchmark is triggered. It does not work with the "index" mode of kube-burner

## Pod latency

Expand All @@ -14,10 +14,9 @@ Collects latencies from the different pod startup phases, these **latency metric
```yaml
measurements:
- name: podLatency
esIndex: kube-burner-podlatency
```

This measurement sends its metrics to the index configured by *esIndex*. The metrics collected are pod latency histograms and pod latency quantiles P99, P95 and P50.
This measurement sends its metrics to configured indexer. The metrics collected are pod latency histograms and pod latency quantiles P99, P95 and P50.

Pod latency sample:

Expand Down Expand Up @@ -86,8 +85,6 @@ And the metrics are:

More information about the pod conditions can be found in the [kubernetes docs site](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions).

**Note**: The __esIndex__ option can be used to configure the ES index where metrics will be indexed.

### Pod latency thresholds

It's possible to stablish pod latency thresholds in the different pod conditions and metrics through the option `thresholds` from the podLatency measurement:
Expand All @@ -97,7 +94,6 @@ For example, the example below establish a threshold of 2000ms in the P99 metric
```yaml
measurements:
- name: podLatency
esIndex: kube-burner-podlatency
thresholds:
- conditionType: Ready
metric: P99
Expand Down
3 changes: 0 additions & 3 deletions examples/workloads/api-intensive/api-intensive.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
---
global:
writeToFile: true
metricsDirectory: collected-metrics
indexerConfig:
enabled: true
esServers: [http://elastic-elk.apps.rsevilla.kube-burner.com]
Expand All @@ -10,7 +8,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: kube-burner

jobs:
- name: api-intensive
Expand Down
4 changes: 0 additions & 4 deletions examples/workloads/cluster-density/cluster-density.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@

global:
writeToFile: true
createTarball: true
metricsDirectory: collected-metrics
indexerConfig:
enabled: false
esServers: [http://elastic-elk.apps.rsevilla-ocp45-ovn.perf-testing.devcluster.openshift.com]
Expand All @@ -11,7 +8,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: kube-burner

jobs:
- name: cluster-density
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
---
global:
writeToFile: true
metricsDirectory: collected-metrics
indexerConfig:
enabled: true
esServers: [http://elastic-elk.apps.rsevilla.kube-burner.com]
Expand All @@ -10,7 +8,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: kube-burner

jobs:
- name: deny-all-policy
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
---
global:
writeToFile: true
metricsDirectory: collected-metrics
indexerConfig:
enabled: true
esServers: [http://elastic-elk.apps.rsevilla.kube-burner.com]
Expand All @@ -10,7 +8,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: kube-burner

jobs:
- name: kubelet-density-cni
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
---
global:
writeToFile: true
metricsDirectory: collected-metrics
indexerConfig:
enabled: true
esServers: [http://elastic-elk.apps.rsevilla.kube-burner.com]
Expand All @@ -10,7 +8,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: kube-burner

jobs:
- name: kubelet-density-heavy
Expand Down
4 changes: 0 additions & 4 deletions examples/workloads/kubelet-density/kubelet-density.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
---
global:
writeToFile: true
metricsDirectory: collected-metrics
createTarball: true
indexerConfig:
enabled: false
esServers: [http://elastic-elk.apps.rsevilla.kube-burner.com]
Expand All @@ -11,7 +8,6 @@ global:
type: elastic
measurements:
- name: podLatency
esIndex: kube-burner
thresholds:
- conditionType: Ready
metric: Avg
Expand Down
4 changes: 0 additions & 4 deletions examples/workloads/kubevirt-density/kubevirt-density.yml
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
global:
# collect metrics from Prometheus and dump to files
writeToFile: true
createTarball: true
metricsDirectory: collected-metrics
measurements:
- name: vmiLatency
esIndex: kube-burner

# save metrics to database (e.g., elasticsearch)
indexerConfig:
Expand Down
5 changes: 3 additions & 2 deletions pkg/alerting/alert_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ func (a *AlertManager) readProfile(alertProfileCfg string) error {

// Evaluate evaluates expressions
func (a *AlertManager) Evaluate(start, end time.Time) int {
log.Infof("Evaluating alerts for prometheus: %v", a.prometheus.Endpoint)
var alertList []interface{}
elapsed := int(end.Sub(start).Minutes())
var renderedQuery bytes.Buffer
Expand Down Expand Up @@ -201,6 +202,6 @@ func parseMatrix(value model.Value, description string, severity severityLevel)
}

func (a *AlertManager) index(alertSet []interface{}) {
log.Info("Indexing alerts in ", a.indexName)
(*a.indexer).Index(a.indexName, alertSet)
log.Info("Indexing alerts")
(*a.indexer).Index(alertSet, indexers.IndexingOpts{MetricName: alertMetricName})
}
14 changes: 5 additions & 9 deletions pkg/burner/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ import (

"github.com/cloud-bulldozer/kube-burner/log"
"github.com/cloud-bulldozer/kube-burner/pkg/alerting"
"github.com/cloud-bulldozer/kube-burner/pkg/commons"
"github.com/cloud-bulldozer/kube-burner/pkg/config"
"github.com/cloud-bulldozer/kube-burner/pkg/indexers"
"github.com/cloud-bulldozer/kube-burner/pkg/measurements"
"github.com/cloud-bulldozer/kube-burner/pkg/prometheus"
"github.com/cloud-bulldozer/kube-burner/pkg/util"
"github.com/cloud-bulldozer/kube-burner/pkg/util/metrics"
"github.com/cloud-bulldozer/kube-burner/pkg/version"
"golang.org/x/time/rate"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -166,10 +166,7 @@ func Run(configSpec config.Spec, uuid string, prometheusClients []*prometheus.Pr
if configSpec.GlobalConfig.IndexerConfig.Enabled {
for _, job := range jobList {
elapsedTime := job.End.Sub(job.Start).Seconds()
err := indexjobSummaryInfo(configSpec, indexer, uuid, elapsedTime, job.Config, job.Start, metadata)
if err != nil {
log.Errorf(err.Error())
}
indexjobSummaryInfo(indexer, uuid, elapsedTime, job.Config, job.Start, metadata)
}
}
// Update end time of last job
Expand All @@ -181,16 +178,15 @@ func Run(configSpec config.Spec, uuid string, prometheusClients []*prometheus.Pr
for idx, prometheusClient := range prometheusClients {
// If alertManager is configured
if alertMs[idx] != nil {
log.Infof("Evaluating alerts for prometheus - %v", prometheusClient.ConfigSpec.GlobalConfig.PrometheusURL)
if alertMs[idx].Evaluate(jobList[0].Start, jobList[len(jobList)-1].End) == 1 {
innerRC = 1
}
}
prometheusClient.JobList = prometheusJobList
// If prometheus is enabled query metrics from the start of the first job to the end of the last one
if configSpec.GlobalConfig.IndexerConfig.Enabled || configSpec.GlobalConfig.WriteToFile {
commons.ScrapeMetrics(prometheusClient, indexer)
commons.HandleTarball(configSpec)
if configSpec.GlobalConfig.IndexerConfig.Enabled {
metrics.ScrapeMetrics(prometheusClient, indexer)
metrics.HandleTarball(configSpec)
}
}
log.Infof("Finished execution with UUID: %s", uuid)
Expand Down

0 comments on commit feec442

Please sign in to comment.