Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes absent_over_time to work with all log selector. #3143

Merged
merged 2 commits into from
Jan 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/sources/logql/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,7 @@ The first type uses log entries to compute values and supported functions for op
- `count_over_time(log-range)`: counts the entries for each log stream within the given range.
- `bytes_rate(log-range)`: calculates the number of bytes per second for each stream.
- `bytes_over_time(log-range)`: counts the amount of bytes used by each log stream for a given range.
- `absent_over_time(log-range)`: returns an empty vector if the range vector passed to it has any elements and a 1-element vector with the value 1 if the range vector passed to it has no elements. (`absent_over_time` is useful for alerting on when no time series and logs stream exist for label combination for a certain amount of time.)

##### Log Examples

Expand Down
2 changes: 1 addition & 1 deletion pkg/logql/functions.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func (r rangeAggregationExpr) extractor(override *grouping) (log.SampleExtractor
}
// otherwise we extract metrics from the log line.
switch r.operation {
case OpRangeTypeRate, OpRangeTypeCount:
case OpRangeTypeRate, OpRangeTypeCount, OpRangeTypeAbsent:
return log.NewLineSampleExtractor(log.CountExtractor, stages, groups, without, noLabels)
case OpRangeTypeBytes, OpRangeTypeBytesRate:
return log.NewLineSampleExtractor(log.BytesExtractor, stages, groups, without, noLabels)
Expand Down
89 changes: 89 additions & 0 deletions pkg/logql/functions_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
package logql

import (
"testing"

"github.com/stretchr/testify/require"
)

func Test_Extractor(t *testing.T) {
t.Parallel()
for _, tc := range []string{
`rate( ( {job="mysql"} |="error" !="timeout" ) [10s] )`,
`absent_over_time( ( {job="mysql"} |="error" !="timeout" ) [10s] )`,
`sum without(a) ( rate ( ( {job="mysql"} |="error" !="timeout" ) [10s] ) )`,
`sum by(a) (rate( ( {job="mysql"} |="error" !="timeout" ) [10s] ) )`,
`sum(count_over_time({job="mysql"}[5m]))`,
`sum(count_over_time({job="mysql"} | json [5m]))`,
`sum(count_over_time({job="mysql"} | logfmt [5m]))`,
`sum(count_over_time({job="mysql"} | regexp "(?P<foo>foo|bar)" [5m]))`,
`topk(10,sum(rate({region="us-east1"}[5m])) by (name))`,
`topk by (name)(10,sum(rate({region="us-east1"}[5m])))`,
`avg( rate( ( {job="nginx"} |= "GET" ) [10s] ) ) by (region)`,
`avg(min_over_time({job="nginx"} |= "GET" | unwrap foo[10s])) by (region)`,
`sum by (cluster) (count_over_time({job="mysql"}[5m]))`,
`sum by (cluster) (count_over_time({job="mysql"}[5m])) / sum by (cluster) (count_over_time({job="postgres"}[5m])) `,
`
sum by (cluster) (count_over_time({job="postgres"}[5m])) /
sum by (cluster) (count_over_time({job="postgres"}[5m])) /
sum by (cluster) (count_over_time({job="postgres"}[5m]))
`,
`sum by (cluster) (count_over_time({job="mysql"}[5m])) / min(count_over_time({job="mysql"}[5m])) `,
`sum by (job) (
count_over_time({namespace="tns"} |= "level=error"[5m])
/
count_over_time({namespace="tns"}[5m])
)`,
`stdvar_over_time({app="foo"} |= "bar" | json | latency >= 250ms or ( status_code < 500 and status_code > 200)
| line_format "blip{{ .foo }}blop {{.status_code}}" | label_format foo=bar,status_code="buzz{{.bar}}" | unwrap foo [5m])`,
`sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms|unwrap latency [5m])`,
`sum by (job) (
sum_over_time({namespace="tns"} |= "level=error" | json | foo=5 and bar<25ms | unwrap latency[5m])
/
count_over_time({namespace="tns"} | logfmt | label_format foo=bar[5m])
)`,
`sum by (job) (
sum_over_time({namespace="tns"} |= "level=error" | json | foo=5 and bar<25ms | unwrap bytes(latency)[5m])
/
count_over_time({namespace="tns"} | logfmt | label_format foo=bar[5m])
)`,
`sum by (job) (
sum_over_time(
{namespace="tns"} |= "level=error" | json | avg=5 and bar<25ms | unwrap duration(latency) [5m]
)
/
count_over_time({namespace="tns"} | logfmt | label_format foo=bar[5m])
)`,
`sum_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms | unwrap latency | __error__!~".*" | foo >5[5m])`,
`absent_over_time({namespace="tns"} |= "level=error" | json |foo>=5,bar<25ms | unwrap latency | __error__!~".*" | foo >5[5m])`,
`absent_over_time({namespace="tns"} |= "level=error" | json [5m])`,
`sum by (job) (
sum_over_time(
{namespace="tns"} |= "level=error" | json | avg=5 and bar<25ms | unwrap duration(latency) | __error__!~".*" [5m]
)
/
count_over_time({namespace="tns"} | logfmt | label_format foo=bar[5m])
)`,
`label_replace(
sum by (job) (
sum_over_time(
{namespace="tns"} |= "level=error" | json | avg=5 and bar<25ms | unwrap duration(latency) | __error__!~".*" [5m]
)
/
count_over_time({namespace="tns"} | logfmt | label_format foo=bar[5m])
),
"foo",
"$1",
"service",
"(.*):.*"
)
`,
} {
t.Run(tc, func(t *testing.T) {
expr, err := ParseSampleExpr(tc)
require.Nil(t, err)
_, err = expr.Extractor()
require.Nil(t, err)
})
}
}