Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Request-level circuit breaker support on coordinating nodes #62223

Merged
merged 16 commits into from
Sep 24, 2020
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.benchmark.search.aggregations;

import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.QueryPhaseResultConsumer;
import org.elasticsearch.action.search.SearchPhaseController;
import org.elasticsearch.action.search.SearchProgressListener;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.breaker.NoopCircuitBreaker;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.query.QuerySearchResult;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;

import java.util.AbstractList;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import static java.util.Collections.emptyList;

@Warmup(iterations = 5)
@Measurement(iterations = 7)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Thread)
@Fork(value = 1)
public class TermsReduceBenchmark {
private final SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
private final SearchPhaseController controller = new SearchPhaseController(
namedWriteableRegistry,
req -> new InternalAggregation.ReduceContextBuilder() {
@Override
public InternalAggregation.ReduceContext forPartialReduction() {
return InternalAggregation.ReduceContext.forPartialReduction(null, null, () -> PipelineAggregator.PipelineTree.EMPTY);
}

@Override
public InternalAggregation.ReduceContext forFinalReduction() {
final MultiBucketConsumerService.MultiBucketConsumer bucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer(
Integer.MAX_VALUE,
new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST)
);
return InternalAggregation.ReduceContext.forFinalReduction(
null,
null,
bucketConsumer,
PipelineAggregator.PipelineTree.EMPTY
);
}
}
);

@State(Scope.Benchmark)
public static class TermsList extends AbstractList<InternalAggregations> {
@Param({ "1600172297" })
long seed;

@Param({ "64", "128", "512" })
int numShards;

@Param({ "100" })
int topNSize;

@Param({ "1", "10", "100" })
int cardinalityFactor;

List<InternalAggregations> aggsList;

@Setup
public void setup() {
this.aggsList = new ArrayList<>();
Random rand = new Random(seed);
int cardinality = cardinalityFactor * topNSize;
BytesRef[] dict = new BytesRef[cardinality];
for (int i = 0; i < dict.length; i++) {
dict[i] = new BytesRef(Long.toString(rand.nextLong()));
}
for (int i = 0; i < numShards; i++) {
aggsList.add(InternalAggregations.from(Collections.singletonList(newTerms(rand, dict, true))));
}
}

private StringTerms newTerms(Random rand, BytesRef[] dict, boolean withNested) {
Set<BytesRef> randomTerms = new HashSet<>();
for (int i = 0; i < topNSize; i++) {
randomTerms.add(dict[rand.nextInt(dict.length)]);
}
List<StringTerms.Bucket> buckets = new ArrayList<>();
for (BytesRef term : randomTerms) {
InternalAggregations subAggs;
if (withNested) {
subAggs = InternalAggregations.from(Collections.singletonList(newTerms(rand, dict, false)));
} else {
subAggs = InternalAggregations.EMPTY;
}
buckets.add(new StringTerms.Bucket(term, rand.nextInt(10000), subAggs, true, 0L, DocValueFormat.RAW));
}

Collections.sort(buckets, (a, b) -> a.compareKey(b));
return new StringTerms(
"terms",
BucketOrder.key(true),
BucketOrder.count(false),
topNSize,
1,
Collections.emptyMap(),
DocValueFormat.RAW,
numShards,
true,
0,
buckets,
0
);
}

@Override
public InternalAggregations get(int index) {
return aggsList.get(index);
}

@Override
public int size() {
return aggsList.size();
}
}

@Param({ "32", "512" })
private int bufferSize;

@Benchmark
public SearchPhaseController.ReducedQueryPhase reduceAggs(TermsList candidateList) throws Exception {
List<QuerySearchResult> shards = new ArrayList<>();
for (int i = 0; i < candidateList.size(); i++) {
QuerySearchResult result = new QuerySearchResult();
result.setShardIndex(i);
result.from(0);
result.size(0);
result.topDocs(
new TopDocsAndMaxScore(
new TopDocs(new TotalHits(1000, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), new ScoreDoc[0]),
Float.NaN
),
new DocValueFormat[] { DocValueFormat.RAW }
);
result.aggregations(candidateList.get(i));
result.setSearchShardTarget(
new SearchShardTarget("node", new ShardId(new Index("index", "index"), i), null, OriginalIndices.NONE)
);
shards.add(result);
}
SearchRequest request = new SearchRequest();
request.source(new SearchSourceBuilder().size(0).aggregation(AggregationBuilders.terms("test")));
request.setBatchedReduceSize(bufferSize);
ExecutorService executor = Executors.newFixedThreadPool(1);
QueryPhaseResultConsumer consumer = new QueryPhaseResultConsumer(
request,
executor,
new NoopCircuitBreaker(CircuitBreaker.REQUEST),
controller,
SearchProgressListener.NOOP,
namedWriteableRegistry,
shards.size(),
exc -> {}
);
CountDownLatch latch = new CountDownLatch(shards.size());
for (int i = 0; i < shards.size(); i++) {
consumer.consumeResult(shards.get(i), () -> latch.countDown());
}
latch.await();
SearchPhaseController.ReducedQueryPhase phase = consumer.reduce();
executor.shutdownNow();
return phase;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.io.stream.DelayableWriteable;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.AggregationBuilders;
Expand Down Expand Up @@ -174,8 +173,7 @@ public void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Except
}

@Override
public void onPartialReduce(List<SearchShard> shards, TotalHits totalHits,
DelayableWriteable.Serialized<InternalAggregations> aggs, int reducePhase) {
public void onPartialReduce(List<SearchShard> shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) {
numReduces.incrementAndGet();
}

Expand Down
Loading