Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 41 additions & 64 deletions collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ var (
exporterName = "exporter"
)

// ScrapResult is container structure for error handling
// ScrapeResult is container structure for error handling
type ScrapeResult struct {
Err error
Metric Metric
Expand Down Expand Up @@ -433,7 +433,10 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
// Construct labels value
labelsValues := []string{}
for _, label := range m.Labels {
labelsValues = append(labelsValues, row[label])
// Don't include FieldToAppend in the label values
if label != m.FieldToAppend {
labelsValues = append(labelsValues, row[label])
}
}
// Construct Prometheus values to sent back
for metric, metricHelp := range m.MetricsDesc {
Expand All @@ -444,75 +447,42 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
}
e.logger.Debug("Query result",
"value", value)
// If metric do not use a field content in metric's name
if strings.Compare(m.FieldToAppend, "") == 0 {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, m.Context, metric),
metricHelp,
m.Labels,
constLabels,
)
if m.MetricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)

// Build metric desc
suffix := metricNameSuffix(row, metric, m.FieldToAppend)
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, m.Context, suffix),
metricHelp,
m.GetLabels(),
constLabels,
)
// process histogram metric, then cache and send metric through channel
if m.MetricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
if err != nil {
e.logger.Error("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range m.MetricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
e.logger.Error("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
e.logger.Error("Unable to convert bucket limit value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range m.MetricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
e.logger.Error("Unable to convert bucket limit value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)")
continue
}
counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64)
if err != nil {
e.logger.Error("Unable to convert ", field, " value to int (metric="+metric+
",metricHelp="+metricHelp+",value=<"+row[field]+">)")
continue
}
buckets[lelimit] = counter
}
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...))
} else {
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value, labelsValues...))
}
// If no labels, use metric name
} else {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, m.Context, cleanName(row[m.FieldToAppend])),
metricHelp,
nil, constLabels,
)
if m.MetricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64)
if err != nil {
e.logger.Error("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
e.logger.Error("Unable to convert ", field, " value to int (metric="+metric+
",metricHelp="+metricHelp+",value=<"+row[field]+">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range m.MetricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
e.logger.Error("Unable to convert bucket limit value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)")
continue
}
counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64)
if err != nil {
e.logger.Error("Unable to convert ", field, " value to int (metric="+metric+
",metricHelp="+metricHelp+",value=<"+row[field]+">)")
continue
}
buckets[lelimit] = counter
}
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstHistogram(desc, count, value, buckets))
} else {
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value))
buckets[lelimit] = counter
}
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...))
} else {
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value, labelsValues...))
}
metricsCount++
}
Expand Down Expand Up @@ -611,3 +581,10 @@ func cleanName(s string) string {
s = strings.ToLower(s)
return s
}

func metricNameSuffix(row map[string]string, metric, fieldToAppend string) string {
if len(fieldToAppend) == 0 {
return metric
}
return cleanName(row[fieldToAppend])
}
15 changes: 15 additions & 0 deletions collector/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,18 @@ func (e *Exporter) parseFloat(metric, metricHelp string, row map[string]string)
}
return valueFloat, true
}

func (m *Metric) GetLabels() []string {
if len(m.FieldToAppend) == 0 {
return m.Labels
}
// Do not include FieldToAppend in metric labels,
// as this field is appended to the metric FQDN.
var labels []string
for _, label := range m.Labels {
if label != m.FieldToAppend {
labels = append(labels, label)
}
}
return labels
}
2 changes: 1 addition & 1 deletion site/docs/configuration/custom-metrics.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Metrics files must contain a series of `[[metric]]` definitions, in TOML, or the
| metricsdesc | Mapping between field(s) in the request and comment(s) | Dictionary of Strings | Yes | |
| metricstype | Mapping between field(s) in the request and [Prometheus metric types](https://prometheus.io/docs/concepts/metric_types/) | Dictionary of Strings | No | |
| metricsbuckets | Split [histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) metric types into buckets based on value ([example](https://github.com/oracle/oracle-db-appdev-monitoring/blob/main/custom-metrics-example/metric-histogram-example.toml)) | Dictionary of String dictionaries | No | |
| fieldtoappend | Field from the request to append to the metric FQN | String | No | |
| fieldtoappend | Field from the request to append to the metric FQN. This field will **not** be included in the metric labels. | String | No | |
| request | Oracle database query to run for metrics scraping | String | Yes | |
| ignorezeroresult | Whether or not an error will be printed if the request does not return any results | Boolean | No | false |
| querytimeout | Oracle Database query timeout duration, e.g., 300ms, 0.5h | String duration | No | Value of query.timeout in seconds |
Expand Down
1 change: 1 addition & 0 deletions site/docs/releases/changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ List of upcoming and historic changes to the exporter.
Our current priorities are support for Exadata metrics. We expect to address these in an upcoming release.

- Updated project dependencies.
- Added metric label support for metrics using the `fieldtoappend` property. The default `wait_time` and `activity` use the `fieldtoappend` property.
- Fix `wait_time` default metric to work with Oracle Database 19c.

Thank you to the following people for their suggestions and contributions:
Expand Down