Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.tasks.RemovedTaskListener;
import org.elasticsearch.tasks.Task;
Expand Down Expand Up @@ -82,6 +81,8 @@
import static java.util.Collections.singleton;
import static org.elasticsearch.action.admin.cluster.node.tasks.TestTaskPlugin.TEST_TASK_ACTION;
import static org.elasticsearch.action.admin.cluster.node.tasks.TestTaskPlugin.UNBLOCK_TASK_ACTION;
import static org.elasticsearch.action.search.SearchQueryThenFetchAsyncAction.NODE_SEARCH_ACTION_NAME;
import static org.elasticsearch.action.search.SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME;
import static org.elasticsearch.core.TimeValue.timeValueMillis;
import static org.elasticsearch.core.TimeValue.timeValueSeconds;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE;
Expand Down Expand Up @@ -353,8 +354,6 @@ public void testTransportBulkTasks() {
}

public void testSearchTaskDescriptions() {
// TODO: enhance this test to also check the tasks created by batched query execution
updateClusterSettings(Settings.builder().put(SearchService.BATCHED_QUERY_PHASE.getKey(), false));
registerTaskManagerListeners(TransportSearchAction.TYPE.name()); // main task
registerTaskManagerListeners(TransportSearchAction.TYPE.name() + "[*]"); // shard task
createIndex("test");
Expand All @@ -380,6 +379,11 @@ public void testSearchTaskDescriptions() {
// check that if we have any shard-level requests they all have non-zero length description
List<TaskInfo> shardTasks = findEvents(TransportSearchAction.TYPE.name() + "[*]", Tuple::v1);
for (TaskInfo taskInfo : shardTasks) {
// During batched query execution, if a partial reduction was done on the data node, a task will be created to free the reader.
// These tasks don't have descriptions or parent tasks, so they're ignored for this test.
if (taskInfo.action().equals(FREE_CONTEXT_SCROLL_ACTION_NAME)) {
continue;
}
assertThat(taskInfo.parentTaskId(), notNullValue());
assertEquals(mainTask.get(0).taskId(), taskInfo.parentTaskId());
assertTaskHeaders(taskInfo);
Expand All @@ -396,12 +400,12 @@ public void testSearchTaskDescriptions() {
taskInfo.description(),
Regex.simpleMatch("id[*], size[1], lastEmittedDoc[null]", taskInfo.description())
);
case NODE_SEARCH_ACTION_NAME -> assertEquals("NodeQueryRequest", taskInfo.description());
default -> fail("Unexpected action [" + taskInfo.action() + "] with description [" + taskInfo.description() + "]");
}
// assert that all task descriptions have non-zero length
assertThat(taskInfo.description().length(), greaterThan(0));
}
updateClusterSettings(Settings.builder().putNull(SearchService.BATCHED_QUERY_PHASE.getKey()));
}

public void testSearchTaskHeaderLimit() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -552,7 +552,7 @@ private void onNodeQueryFailure(Exception e, NodeQueryRequest request, CanMatchP
}
}

private static final String NODE_SEARCH_ACTION_NAME = "indices:data/read/search[query][n]";
public static final String NODE_SEARCH_ACTION_NAME = "indices:data/read/search[query][n]";

static void registerNodeSearchAction(
SearchTransportService searchTransportService,
Expand Down