Skip to content

Commit

Permalink
Remove hppc from dfs search (#84688)
Browse files Browse the repository at this point in the history
This commit removes the HppcMaps class and converts the few existing
uses, in DFS code, to use Map.
  • Loading branch information
rjernst committed Mar 7, 2022
1 parent 2cf203a commit df5dbd5
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 268 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
package org.elasticsearch.action.search;

import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.ObjectObjectHashMap;

import org.apache.lucene.index.Term;
import org.apache.lucene.search.CollectionStatistics;
Expand All @@ -23,7 +22,6 @@
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.TotalHits.Relation;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.lucene.grouping.TopFieldGroups;
Expand Down Expand Up @@ -73,8 +71,8 @@ public SearchPhaseController(
}

public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) {
ObjectObjectHashMap<Term, TermStatistics> termStatistics = HppcMaps.newNoNullKeysMap();
ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
Map<Term, TermStatistics> termStatistics = new HashMap<>();
Map<String, CollectionStatistics> fieldStatistics = new HashMap<>();
long aggMaxDoc = 0;
for (DfsSearchResult lEntry : results) {
final Term[] terms = lEntry.terms();
Expand Down Expand Up @@ -103,29 +101,25 @@ public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) {
}

assert lEntry.fieldStatistics().containsKey(null) == false;
final Object[] keys = lEntry.fieldStatistics().keys;
final Object[] values = lEntry.fieldStatistics().values;
for (int i = 0; i < keys.length; i++) {
if (keys[i] != null) {
String key = (String) keys[i];
CollectionStatistics value = (CollectionStatistics) values[i];
if (value == null) {
continue;
}
assert key != null;
CollectionStatistics existing = fieldStatistics.get(key);
if (existing != null) {
CollectionStatistics merged = new CollectionStatistics(
key,
existing.maxDoc() + value.maxDoc(),
existing.docCount() + value.docCount(),
existing.sumTotalTermFreq() + value.sumTotalTermFreq(),
existing.sumDocFreq() + value.sumDocFreq()
);
fieldStatistics.put(key, merged);
} else {
fieldStatistics.put(key, value);
}
for (var entry : lEntry.fieldStatistics().entrySet()) {
String key = entry.getKey();
CollectionStatistics value = entry.getValue();
if (value == null) {
continue;
}
assert key != null;
CollectionStatistics existing = fieldStatistics.get(key);
if (existing != null) {
CollectionStatistics merged = new CollectionStatistics(
key,
existing.maxDoc() + value.maxDoc(),
existing.docCount() + value.docCount(),
existing.sumTotalTermFreq() + value.sumTotalTermFreq(),
existing.sumDocFreq() + value.sumDocFreq()
);
fieldStatistics.put(key, merged);
} else {
fieldStatistics.put(key, value);
}
}
aggMaxDoc += lEntry.maxDoc();
Expand Down
118 changes: 0 additions & 118 deletions server/src/main/java/org/elasticsearch/common/collect/HppcMaps.java

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -8,28 +8,26 @@

package org.elasticsearch.search.dfs;

import com.carrotsearch.hppc.ObjectObjectHashMap;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;

import org.apache.lucene.index.Term;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.TermStatistics;
import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

public class AggregatedDfs implements Writeable {

private ObjectObjectHashMap<Term, TermStatistics> termStatistics;
private ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics;
private Map<Term, TermStatistics> termStatistics;
private Map<String, CollectionStatistics> fieldStatistics;
private long maxDoc;

public AggregatedDfs(StreamInput in) throws IOException {
int size = in.readVInt();
termStatistics = HppcMaps.newMap(size);
termStatistics = new HashMap<>(size);
for (int i = 0; i < size; i++) {
Term term = new Term(in.readString(), in.readBytesRef());
TermStatistics stats = new TermStatistics(in.readBytesRef(), in.readVLong(), DfsSearchResult.subOne(in.readVLong()));
Expand All @@ -39,21 +37,17 @@ public AggregatedDfs(StreamInput in) throws IOException {
maxDoc = in.readVLong();
}

public AggregatedDfs(
ObjectObjectHashMap<Term, TermStatistics> termStatistics,
ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics,
long maxDoc
) {
public AggregatedDfs(Map<Term, TermStatistics> termStatistics, Map<String, CollectionStatistics> fieldStatistics, long maxDoc) {
this.termStatistics = termStatistics;
this.fieldStatistics = fieldStatistics;
this.maxDoc = maxDoc;
}

public ObjectObjectHashMap<Term, TermStatistics> termStatistics() {
public Map<Term, TermStatistics> termStatistics() {
return termStatistics;
}

public ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics() {
public Map<String, CollectionStatistics> fieldStatistics() {
return fieldStatistics;
}

Expand All @@ -65,11 +59,11 @@ public long maxDoc() {
public void writeTo(final StreamOutput out) throws IOException {
out.writeVInt(termStatistics.size());

for (ObjectObjectCursor<Term, TermStatistics> c : termStatistics()) {
Term term = c.key;
for (var entry : termStatistics().entrySet()) {
Term term = entry.getKey();
out.writeString(term.field());
out.writeBytesRef(term.bytes());
TermStatistics stats = c.value;
TermStatistics stats = entry.getValue();
out.writeBytesRef(stats.term());
out.writeVLong(stats.docFreq());
out.writeVLong(DfsSearchResult.addOne(stats.totalTermFreq()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,12 @@

package org.elasticsearch.search.dfs;

import com.carrotsearch.hppc.ObjectObjectHashMap;

import org.apache.lucene.index.Term;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.TermStatistics;
import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.rescore.RescoreContext;
import org.elasticsearch.tasks.TaskCancelledException;
Expand All @@ -33,7 +30,7 @@ public class DfsPhase {

public void execute(SearchContext context) {
try {
ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
Map<String, CollectionStatistics> fieldStatistics = new HashMap<>();
Map<Term, TermStatistics> stats = new HashMap<>();
IndexSearcher searcher = new IndexSearcher(context.searcher().getIndexReader()) {
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,11 @@

package org.elasticsearch.search.dfs;

import com.carrotsearch.hppc.ObjectObjectHashMap;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;

import org.apache.lucene.index.Term;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.TermStatistics;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.SearchPhaseResult;
Expand All @@ -25,14 +21,16 @@
import org.elasticsearch.search.internal.ShardSearchRequest;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

public class DfsSearchResult extends SearchPhaseResult {

private static final Term[] EMPTY_TERMS = new Term[0];
private static final TermStatistics[] EMPTY_TERM_STATS = new TermStatistics[0];
private Term[] terms;
private TermStatistics[] termStatistics;
private ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
private Map<String, CollectionStatistics> fieldStatistics = new HashMap<>();
private int maxDoc;

public DfsSearchResult(StreamInput in) throws IOException {
Expand Down Expand Up @@ -77,7 +75,7 @@ public DfsSearchResult termsStatistics(Term[] terms, TermStatistics[] termStatis
return this;
}

public DfsSearchResult fieldStatistics(ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics) {
public DfsSearchResult fieldStatistics(Map<String, CollectionStatistics> fieldStatistics) {
this.fieldStatistics = fieldStatistics;
return this;
}
Expand All @@ -90,7 +88,7 @@ public TermStatistics[] termStatistics() {
return termStatistics;
}

public ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics() {
public Map<String, CollectionStatistics> fieldStatistics() {
return fieldStatistics;
}

Expand All @@ -110,13 +108,12 @@ public void writeTo(StreamOutput out) throws IOException {
}
}

public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics)
throws IOException {
public static void writeFieldStats(StreamOutput out, Map<String, CollectionStatistics> fieldStatistics) throws IOException {
out.writeVInt(fieldStatistics.size());

for (ObjectObjectCursor<String, CollectionStatistics> c : fieldStatistics) {
out.writeString(c.key);
CollectionStatistics statistics = c.value;
for (var entry : fieldStatistics.entrySet()) {
out.writeString(entry.getKey());
CollectionStatistics statistics = entry.getValue();
assert statistics.maxDoc() >= 0;
out.writeVLong(statistics.maxDoc());
// stats are always positive numbers
Expand Down Expand Up @@ -144,9 +141,9 @@ public static void writeSingleTermStats(StreamOutput out, TermStatistics termSta
}
}

static ObjectObjectHashMap<String, CollectionStatistics> readFieldStats(StreamInput in) throws IOException {
static Map<String, CollectionStatistics> readFieldStats(StreamInput in) throws IOException {
final int numFieldStatistics = in.readVInt();
ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap(numFieldStatistics);
Map<String, CollectionStatistics> fieldStatistics = new HashMap<>(numFieldStatistics);
for (int i = 0; i < numFieldStatistics; i++) {
final String field = in.readString();
assert field != null;
Expand Down

0 comments on commit df5dbd5

Please sign in to comment.