Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,12 @@
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.After;
import org.junit.Before;

import java.io.IOException;
import java.util.ArrayList;
Expand Down Expand Up @@ -53,18 +50,6 @@ public static String randomExecutionHint() {

private static int numRoutingValues;

@Before
public void disableBatchedExecution() {
// TODO: it's practically impossible to get a 100% deterministic test with batched execution unfortunately, adjust this test to
// still do something useful with batched execution (i.e. use somewhat relaxed assertions)
updateClusterSettings(Settings.builder().put(SearchService.BATCHED_QUERY_PHASE.getKey(), false));
}

@After
public void resetSettings() {
updateClusterSettings(Settings.builder().putNull(SearchService.BATCHED_QUERY_PHASE.getKey()));
}

@Override
public void setupSuiteScopeCluster() throws Exception {
assertAcked(indicesAdmin().prepareCreate("idx").setMapping(STRING_FIELD_NAME, "type=keyword").get());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,7 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception {
batchedResults = this.batchedResults;
}
final int resultSize = buffer.size() + (mergeResult == null ? 0 : 1) + batchedResults.size();
final boolean hasBatchedResults = batchedResults.isEmpty() == false;
final List<TopDocs> topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null;
final Deque<DelayableWriteable<InternalAggregations>> aggsList = hasAggs ? new ArrayDeque<>(resultSize) : null;

Expand Down Expand Up @@ -252,6 +253,10 @@ public SearchPhaseController.ReducedQueryPhase reduce() throws Exception {
if (aggsList != null) {
// Add an estimate of the final reduce size
breakerSize = addEstimateAndMaybeBreak(estimateRamBytesUsedForReduce(circuitBreakerBytes));
AggregationReduceContext aggReduceContext = performFinalReduce
? aggReduceContextBuilder.forFinalReduction()
: aggReduceContextBuilder.forPartialReduction();
aggReduceContext.setHasBatchedResult(hasBatchedResults);
aggs = aggregate(buffer.iterator(), new Iterator<>() {
@Override
public boolean hasNext() {
Expand All @@ -262,10 +267,7 @@ public boolean hasNext() {
public DelayableWriteable<InternalAggregations> next() {
return aggsList.pollFirst();
}
},
resultSize,
performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction()
);
}, resultSize, aggReduceContext);
} else {
aggs = null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ public interface Builder {
@Nullable
private final AggregationBuilder builder;
private final AggregatorFactories.Builder subBuilders;
private boolean hasBatchedResult;

private AggregationReduceContext(
BigArrays bigArrays,
Expand Down Expand Up @@ -136,6 +137,14 @@ public final AggregationReduceContext forAgg(String name) {

protected abstract AggregationReduceContext forSubAgg(AggregationBuilder sub);

public boolean hasBatchedResult() {
return hasBatchedResult;
}

public void setHasBatchedResult(boolean hasBatchedResult) {
this.hasBatchedResult = hasBatchedResult;
}

/**
* A {@linkplain AggregationReduceContext} to perform a partial reduction.
*/
Expand Down Expand Up @@ -234,7 +243,9 @@ public PipelineTree pipelineTreeRoot() {

@Override
protected AggregationReduceContext forSubAgg(AggregationBuilder sub) {
return new ForFinal(bigArrays(), scriptService(), isCanceled(), sub, multiBucketConsumer, pipelineTreeRoot);
ForFinal subContext = new ForFinal(bigArrays(), scriptService(), isCanceled(), sub, multiBucketConsumer, pipelineTreeRoot);
subContext.setHasBatchedResult(hasBatchedResult());
return subContext;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,10 @@ public InternalAggregation get() {
}
long docCountError = -1;
if (sumDocCountError != -1) {
docCountError = size == 1 ? 0 : sumDocCountError;
// If we are reducing only one aggregation (size == 1), the doc count error should be 0.
// However, the presence of a batched query result implies a partial reduction with size > 1
// has already occurred on a data node. The doc count error should not be 0 in this case.
docCountError = size == 1 && reduceContext.hasBatchedResult() == false ? 0 : sumDocCountError;
}
return create(name, result, reduceContext.isFinalReduce() ? getOrder() : thisReduceOrder, docCountError, otherDocCount);
}
Expand Down