Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added third highlighter type based on lucene postings highlighter
Requires field index_options set to "offsets" in order to store positions and offsets in the postings list. Considerably faster than the plain highlighter since it doesn't require to reanalyze the text to be highlighted: the larger the documents the better the performance gain should be. Requires less disk space than term_vectors, needed for the fast_vector_highlighter. Breaks the text into sentences and highlights them. Uses a BreakIterator to find sentences in the text. Plays really well with natural text, not quite the same if the text contains html markup for instance. Treats the document as the whole corpus, and scores individual sentences as if they were documents in this corpus, using the BM25 algorithm. Uses forked version of lucene postings highlighter to support: - per value discrete highlighting for fields that have multiple values, needed when number_of_fragments=0 since we want to return a snippet per value - manually passing in query terms to avoid calling extract terms multiple times, since we use a different highlighter instance per doc/field, but the query is always the same The lucene postings highlighter api is quite different compared to the existing highlighters api, the main difference being that it allows to highlight multiple fields in multiple docs with a single call, ensuring sequential IO. The way it is introduced in elasticsearch in this first round is a compromise trying not to change the current highlight api, which works per document, per field. The main disadvantage is that we lose the sequential IO, but we can always refactor the highlight api to work with multiple documents. Supports pre_tag, post_tag, number_of_fragments (0 highlights the whole field), require_field_match, no_match_size, order by score and html encoding. Closes #3704
- Loading branch information
Showing
21 changed files
with
4,770 additions
and
115 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
78 changes: 78 additions & 0 deletions
78
src/main/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatter.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
/* | ||
* Licensed to ElasticSearch and Shay Banon under one | ||
* or more contributor license agreements. See the NOTICE file | ||
* distributed with this work for additional information | ||
* regarding copyright ownership. ElasticSearch licenses this | ||
* file to you under the Apache License, Version 2.0 (the | ||
* "License"); you may not use this file except in compliance | ||
* with the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT | ||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the | ||
* License for the specific language governing permissions and limitations under | ||
* the License. | ||
*/ | ||
|
||
package org.apache.lucene.search.postingshighlight; | ||
|
||
import org.apache.lucene.search.highlight.Encoder; | ||
import org.elasticsearch.search.highlight.HighlightUtils; | ||
|
||
/** | ||
Custom passage formatter that allows us to: | ||
1) extract different snippets (instead of a single big string) together with their scores ({@link Snippet}) | ||
2) use the {@link Encoder} implementations that are already used with the other highlighters | ||
*/ | ||
public class CustomPassageFormatter extends XPassageFormatter { | ||
|
||
private final String preTag; | ||
private final String postTag; | ||
private final Encoder encoder; | ||
|
||
public CustomPassageFormatter(String preTag, String postTag, Encoder encoder) { | ||
this.preTag = preTag; | ||
this.postTag = postTag; | ||
this.encoder = encoder; | ||
} | ||
|
||
@Override | ||
public Snippet[] format(Passage[] passages, String content) { | ||
Snippet[] snippets = new Snippet[passages.length]; | ||
int pos; | ||
for (int j = 0; j < passages.length; j++) { | ||
Passage passage = passages[j]; | ||
StringBuilder sb = new StringBuilder(); | ||
pos = passage.startOffset; | ||
for (int i = 0; i < passage.numMatches; i++) { | ||
int start = passage.matchStarts[i]; | ||
int end = passage.matchEnds[i]; | ||
// its possible to have overlapping terms | ||
if (start > pos) { | ||
append(sb, content, pos, start); | ||
} | ||
if (end > pos) { | ||
sb.append(preTag); | ||
append(sb, content, Math.max(pos, start), end); | ||
sb.append(postTag); | ||
pos = end; | ||
} | ||
} | ||
// its possible a "term" from the analyzer could span a sentence boundary. | ||
append(sb, content, pos, Math.max(pos, passage.endOffset)); | ||
//we remove the paragraph separator if present at the end of the snippet (we used it as separator between values) | ||
if (sb.charAt(sb.length() - 1) == HighlightUtils.PARAGRAPH_SEPARATOR) { | ||
sb.deleteCharAt(sb.length() - 1); | ||
} | ||
//and we trim the snippets too | ||
snippets[j] = new Snippet(sb.toString().trim(), passage.score, passage.numMatches > 0); | ||
} | ||
return snippets; | ||
} | ||
|
||
protected void append(StringBuilder dest, String content, int start, int end) { | ||
dest.append(encoder.encodeText(content.substring(start, end))); | ||
} | ||
} |
Oops, something went wrong.