Skip to content

Commit

Permalink
Merge pull request #84 from topolvm/csa-apps-640-add-metrics
Browse files Browse the repository at this point in the history
add metrics
  • Loading branch information
cupnes committed Aug 17, 2023
2 parents 9490588 + a6b6ed0 commit 276db65
Show file tree
Hide file tree
Showing 9 changed files with 87 additions and 17 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ The number of attempts that the creation of the Pod object and the creation of t

TYPE: counter

### `pie_performance_probe_total`
The number of attempts that the creation of the Pod object and the creation of the container.

TYPE: counter

## Contributing

### Test It Out
Expand Down
16 changes: 16 additions & 0 deletions e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,9 @@ var _ = Describe("pie", func() {
onTimeTrueLabelPair := io_prometheus_client.LabelPair{Name: &onTimeLabelKey, Value: &trueValue}
onTimeFalseLabelPair := io_prometheus_client.LabelPair{Name: &onTimeLabelKey, Value: &falseValue}

succeedLabelKey := "succeed"
succeedTrueLabelPair := io_prometheus_client.LabelPair{Name: &succeedLabelKey, Value: &trueValue}

Eventually(func(g Gomega) {
resp, err := http.Get("http://localhost:8080/metrics")
g.Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -172,6 +175,19 @@ var _ = Describe("pie", func() {
}
}
}

By("checking pie_performance_probe_total with succeed=true is more than zero for standard SC")
g.Expect("pie_performance_probe_total").Should(BeKeyOf(metricFamilies))
metrics = metricFamilies["pie_performance_probe_total"].Metric
g.Expect(metrics).Should(ContainElement(HaveField("Label", ContainElement(&standardSCLabelPair))))
for _, metric := range metrics {
for _, label := range metric.Label {
if reflect.DeepEqual(label, &standardSCLabelPair) {
g.Expect(metric.Label).Should(ContainElement(&succeedTrueLabelPair))
g.Expect(metric.Counter).ShouldNot(BeZero())
}
}
}
}).Should(Succeed())

})
Expand Down
26 changes: 23 additions & 3 deletions metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,14 @@ const (
type MetricsExporter interface {
SetLatency(node, storageClass string, readLatency, writeLatency float64)
IncrementCreateProbeCount(node string, storageClass string, onTime bool)
IncrementPerformanceProbeCount(node string, storageClass string, succeed bool)
}

type metricExporterImpl struct {
writeLatencyGauge *prometheus.GaugeVec
readLatencyGauge *prometheus.GaugeVec
createProbeCount *prometheus.CounterVec
writeLatencyGauge *prometheus.GaugeVec
readLatencyGauge *prometheus.GaugeVec
createProbeCount *prometheus.CounterVec
performanceProbeCount *prometheus.CounterVec
}

func NewMetrics() MetricsExporter {
Expand Down Expand Up @@ -57,6 +59,16 @@ func (m *metricExporterImpl) registerMetrics() {
[]string{"node", "storage_class", "on_time"})

metrics.Registry.MustRegister(m.createProbeCount)

m.performanceProbeCount = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "pie",
Name: "performance_probe_total",
Help: "The number of performance tests on a probe container.",
},
[]string{"node", "storage_class", "succeed"})

metrics.Registry.MustRegister(m.performanceProbeCount)
}

func (m *metricExporterImpl) SetLatency(node string, storageClass string, readLatency, writeLatency float64) {
Expand All @@ -71,3 +83,11 @@ func (m *metricExporterImpl) IncrementCreateProbeCount(node string, storageClass
}
m.createProbeCount.WithLabelValues(node, storageClass, onTimeStr).Inc()
}

func (m *metricExporterImpl) IncrementPerformanceProbeCount(node string, storageClass string, succeed bool) {
succeedStr := "false"
if succeed {
succeedStr = "true"
}
m.performanceProbeCount.WithLabelValues(node, storageClass, succeedStr).Inc()
}
1 change: 1 addition & 0 deletions metrics/receiver.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ func (rh *receiver) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}

rh.metrics.SetLatency(receivedData.Node, receivedData.StorageClass, receivedData.ReadLatency, receivedData.WriteLatency)
rh.metrics.IncrementPerformanceProbeCount(receivedData.Node, receivedData.StorageClass, receivedData.PerformanceProbeSucceed)

fmt.Fprintf(w, "OK")
}
Expand Down
31 changes: 28 additions & 3 deletions probe/diskmetrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func execWrap(stdin []byte, command string, args ...string) ([]byte, error) {
return stdoutBuf.Bytes(), nil
}

func parseFioResult(fioOutput []byte, property string) (float64, error) {
func parseFioLatency(fioOutput []byte, property string) (float64, error) {
jqOut, err := execWrap(
fioOutput,
"jq",
Expand All @@ -53,6 +53,26 @@ func parseFioResult(fioOutput []byte, property string) (float64, error) {
return actualNumber / 1_000_000_000, nil
}

func parseFioError(fioOutput []byte) (int, error) {
jqOut, err := execWrap(
fioOutput,
"jq",
".jobs[0].error",
)
if err != nil {
return 0, err
}

stringJqOut := string(jqOut)

actualNumber, err := strconv.ParseInt(stringJqOut[0:len(stringJqOut)-1], 0, 0)
if err != nil {
return 0, err
}

return int(actualNumber), nil
}

func (mtr *diskMetricsImpl) GetMetrics(ctx context.Context) (*DiskMetrics, error) {
fioStdout, err := execWrap(
nil,
Expand All @@ -73,12 +93,17 @@ func (mtr *diskMetricsImpl) GetMetrics(ctx context.Context) (*DiskMetrics, error
}

var metrics DiskMetrics
metrics.ReadLatency, err = parseFioResult(fioStdout, "read")
metrics.ReadLatency, err = parseFioLatency(fioStdout, "read")
if err != nil {
return nil, err
}

metrics.WriteLatency, err = parseFioLatency(fioStdout, "write")
if err != nil {
return nil, err
}

metrics.WriteLatency, err = parseFioResult(fioStdout, "write")
metrics.ErrorNumber, err = parseFioError(fioStdout)
if err != nil {
return nil, err
}
Expand Down
11 changes: 6 additions & 5 deletions probe/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,13 @@ func NewDiskInfoExporter(url string, node string, storageClass string) DiskInfoE
}
}

func (di *diskInfoImpl) Export(metrics DiskMetrics) error {
func (di *diskInfoImpl) Export(metrics *DiskMetrics) error {
m := types.MetricsExchangeFormat{
Node: di.node,
StorageClass: di.storageClass,
WriteLatency: metrics.WriteLatency,
ReadLatency: metrics.ReadLatency,
Node: di.node,
StorageClass: di.storageClass,
WriteLatency: metrics.WriteLatency,
ReadLatency: metrics.ReadLatency,
PerformanceProbeSucceed: metrics.ErrorNumber == 0,
}

s, err := json.Marshal(m)
Expand Down
2 changes: 1 addition & 1 deletion probe/probe.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ func SubMain(node string, measurePath string, storageClass string, serverURI str
return err
}

err = infoExporter.Export(*metrics)
err = infoExporter.Export(metrics)
if err != nil {
return err
}
Expand Down
3 changes: 2 additions & 1 deletion probe/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@ import "context"
type DiskMetrics struct {
ReadLatency float64
WriteLatency float64
ErrorNumber int
}

type DiskMetricsInterface interface {
GetMetrics(ctx context.Context) (*DiskMetrics, error)
}

type DiskInfoExporter interface {
Export(metrics DiskMetrics) error
Export(metrics *DiskMetrics) error
}
9 changes: 5 additions & 4 deletions types/types.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
package types

type MetricsExchangeFormat struct {
Node string `json:"node"`
StorageClass string `json:"storage_class"`
WriteLatency float64 `json:"write_latency"`
ReadLatency float64 `json:"read_latency"`
Node string `json:"node"`
StorageClass string `json:"storage_class"`
WriteLatency float64 `json:"write_latency"`
ReadLatency float64 `json:"read_latency"`
PerformanceProbeSucceed bool `json:"performance_probe_succeed"`
}

0 comments on commit 276db65

Please sign in to comment.