Skip to content

Commit

Permalink
Fix sneaky date_histogram bug (elastic#65707)
Browse files Browse the repository at this point in the history
`date_histogram` has a bug with `offset` and `extended_bounds` when it
needs to create an "empty" aggregation result: it includes the bounds
twice! Wooops!

I broke this a while back when I started trying to merge `offset` into
`Rounding`. I never finished that merge, sadly. Interestingly, we've
discovered that the merge is required to properly handle daylight
savings time (elastic#56305) but it isn't really something we're looking to
solve today. For now, this just stops counting the offset twice.

Closes elastic#65624
  • Loading branch information
nik9000 authored and Alyosha Kaz committed Mar 9, 2021
1 parent 982cc62 commit 252a856
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 1 deletion.
Expand Up @@ -573,3 +573,62 @@ setup:
- match: { aggregations.histo.buckets.0.doc_count: 1 }
- match: { aggregations.histo.buckets.20.key: 20 }
- match: { aggregations.histo.buckets.20.doc_count: 1 }


---
"date_histogram with extended_bounds and offset and unmapped":
- skip:
version: " - 7.99.99"
reason: fixed in 8.0 to be backported to 7.11

- do:
indices.create:
index: test_nested
body:
mappings:
properties:
foo:
type: nested

- do:
bulk:
index: test_nested
refresh: true
body:
- '{"index": {}}'
- '{"foo": {"date": "2016-01-02"}}'
- '{"index": {}}'
- '{"foo": {"date": "2016-01-03"}}'

- do:
indices.create:
index: test_unmapped

- do:
search:
index: test_*
body:
size: 0
aggs:
foo:
nested:
path: foo
aggs:
histo:
date_histogram:
field: foo.date
calendar_interval: day
offset: 1h
extended_bounds:
min: "2016-01-01"
max: "2016-01-04"
- match: { hits.total.value: 2 }
- length: { aggregations.foo.histo.buckets: 4 }
- match: { aggregations.foo.histo.buckets.0.key_as_string: 2016-01-01T01:00:00.000Z }
- match: { aggregations.foo.histo.buckets.0.doc_count: 1 }
- match: { aggregations.foo.histo.buckets.1.key_as_string: 2016-01-02T01:00:00.000Z }
- match: { aggregations.foo.histo.buckets.1.doc_count: 1 }
- match: { aggregations.foo.histo.buckets.2.key_as_string: 2016-01-03T01:00:00.000Z }
- match: { aggregations.foo.histo.buckets.2.doc_count: 0 }
- match: { aggregations.foo.histo.buckets.3.key_as_string: 2016-01-04T01:00:00.000Z }
- match: { aggregations.foo.histo.buckets.3.doc_count: 0 }
Expand Up @@ -312,7 +312,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I
@Override
public InternalAggregation buildEmptyAggregation() {
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
? new InternalDateHistogram.EmptyBucketInfo(rounding.withoutOffset(), buildEmptySubAggregations(), extendedBounds)
: null;
return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, rounding.offset(), emptyBucketInfo, formatter,
keyed, metadata());
Expand Down
Expand Up @@ -1235,6 +1235,25 @@ public void testIllegalInterval() throws IOException {
assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.");
}

public void testBuildEmpty() throws IOException {
withAggregator(
new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR).offset(10),
new MatchAllDocsQuery(),
iw -> {},
(searcher, aggregator) -> {
InternalDateHistogram histo = (InternalDateHistogram) aggregator.buildEmptyAggregation();
/*
* There was a time where we including the offset in the
* rounding in the emptyBucketInfo which would cause us to
* include the offset twice. This verifies that we don't do
* that.
*/
assertThat(histo.emptyBucketInfo.rounding.prepareForUnknown().round(0), equalTo(0L));
},
aggregableDateFieldType(false, true)
);
}

private void testSearchCase(Query query, List<String> dataset,
Consumer<DateHistogramAggregationBuilder> configure,
Consumer<InternalDateHistogram> verify, boolean useNanosecondResolution) throws IOException {
Expand Down

0 comments on commit 252a856

Please sign in to comment.