Skip to content

Commit

Permalink
Track hot threads as bytes not strings (#103812)
Browse files Browse the repository at this point in the history
No need to allocate a single enormous string for each node-level
response, we can leave them as bytes and process them in a streaming
fashion as needed.
  • Loading branch information
DaveCTurner committed Jan 8, 2024
1 parent 93baa26 commit 205df2a
Show file tree
Hide file tree
Showing 10 changed files with 220 additions and 83 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,15 @@
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.ESNetty4IntegTestCase;
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.MockLogAppender;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportLogger;

import java.io.IOException;
import java.util.concurrent.TimeUnit;

@ESIntegTestCase.ClusterScope(numDataNodes = 2, scope = ESIntegTestCase.Scope.TEST)
public class ESLoggingHandlerIT extends ESNetty4IntegTestCase {
Expand Down Expand Up @@ -54,7 +52,7 @@ public void testLoggingHandler() {
+ ", request id: \\d+"
+ ", type: request"
+ ", version: .*"
+ ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]"
+ ", action: cluster:monitor/nodes/stats\\[n\\]\\]"
+ " WRITE: \\d+B";
final MockLogAppender.LoggingExpectation writeExpectation = new MockLogAppender.PatternSeenEventExpectation(
"hot threads request",
Expand All @@ -74,7 +72,7 @@ public void testLoggingHandler() {
+ ", request id: \\d+"
+ ", type: request"
+ ", version: .*"
+ ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]"
+ ", action: cluster:monitor/nodes/stats\\[n\\]\\]"
+ " READ: \\d+B";

final MockLogAppender.LoggingExpectation readExpectation = new MockLogAppender.PatternSeenEventExpectation(
Expand All @@ -87,7 +85,7 @@ public void testLoggingHandler() {
appender.addExpectation(writeExpectation);
appender.addExpectation(flushExpectation);
appender.addExpectation(readExpectation);
client().execute(TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest()).actionGet(10, TimeUnit.SECONDS);
client().admin().cluster().prepareNodesStats().get(TimeValue.timeValueSeconds(10));
appender.assertAllExpectationsMatched();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction;
import org.elasticsearch.action.support.SubscribableListener;
import org.elasticsearch.common.ReferenceDocs;
import org.elasticsearch.common.logging.ChunkedLoggingStreamTests;
import org.elasticsearch.core.TimeValue;
Expand All @@ -24,7 +25,6 @@

import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;

import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
Expand Down Expand Up @@ -116,61 +116,72 @@ public void onFailure(Exception e) {
public void testIgnoreIdleThreads() {
assumeTrue("no support for hot_threads on FreeBSD", Constants.FREE_BSD == false);

// First time, don't ignore idle threads:
final NodesHotThreadsResponse firstResponse = client().execute(
TransportNodesHotThreadsAction.TYPE,
new NodesHotThreadsRequest().ignoreIdleThreads(false).threads(Integer.MAX_VALUE)
).actionGet(10, TimeUnit.SECONDS);

final Matcher<String> containsCachedTimeThreadRunMethod = containsString(
"org.elasticsearch.threadpool.ThreadPool$CachedTimeThread.run"
);

int totSizeAll = 0;
for (NodeHotThreads node : firstResponse.getNodesMap().values()) {
totSizeAll += node.getHotThreads().length();
assertThat(node.getHotThreads(), containsCachedTimeThreadRunMethod);
}
// First time, don't ignore idle threads:
final var totSizeAll = safeAwait(
SubscribableListener.<Integer>newForked(
l -> client().execute(
TransportNodesHotThreadsAction.TYPE,
new NodesHotThreadsRequest().ignoreIdleThreads(false).threads(Integer.MAX_VALUE),
l.map(response -> {
int length = 0;
for (NodeHotThreads node : response.getNodesMap().values()) {
length += node.getHotThreads().length();
assertThat(node.getHotThreads(), containsCachedTimeThreadRunMethod);
}
return length;
})
)
)
);

// Second time, do ignore idle threads:
final var request = new NodesHotThreadsRequest().threads(Integer.MAX_VALUE);
// Make sure default is true:
assertTrue(request.ignoreIdleThreads());
final NodesHotThreadsResponse secondResponse = client().execute(TransportNodesHotThreadsAction.TYPE, request)
.actionGet(10, TimeUnit.SECONDS);

int totSizeIgnoreIdle = 0;
for (NodeHotThreads node : secondResponse.getNodesMap().values()) {
totSizeIgnoreIdle += node.getHotThreads().length();
assertThat(node.getHotThreads(), not(containsCachedTimeThreadRunMethod));
}
final var totSizeIgnoreIdle = safeAwait(
SubscribableListener.<Integer>newForked(l -> client().execute(TransportNodesHotThreadsAction.TYPE, request, l.map(response -> {
int length = 0;
for (NodeHotThreads node : response.getNodesMap().values()) {
length += node.getHotThreads().length();
assertThat(node.getHotThreads(), not(containsCachedTimeThreadRunMethod));
}
return length;
})))
);

// The filtered stacks should be smaller than unfiltered ones:
assertThat(totSizeIgnoreIdle, lessThan(totSizeAll));
}

public void testTimestampAndParams() {

final NodesHotThreadsResponse response = client().execute(TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest())
.actionGet(10, TimeUnit.SECONDS);

if (Constants.FREE_BSD) {
for (NodeHotThreads node : response.getNodesMap().values()) {
assertThat(node.getHotThreads(), containsString("hot_threads is not supported"));
}
} else {
for (NodeHotThreads node : response.getNodesMap().values()) {
assertThat(
node.getHotThreads(),
allOf(
containsString("Hot threads at"),
containsString("interval=500ms"),
containsString("busiestThreads=3"),
containsString("ignoreIdleThreads=true")
)
);
}
}
safeAwait(
SubscribableListener.<Void>newForked(
l -> client().execute(TransportNodesHotThreadsAction.TYPE, new NodesHotThreadsRequest(), l.map(response -> {
if (Constants.FREE_BSD) {
for (NodeHotThreads node : response.getNodesMap().values()) {
assertThat(node.getHotThreads(), containsString("hot_threads is not supported"));
}
} else {
for (NodeHotThreads node : response.getNodesMap().values()) {
assertThat(
node.getHotThreads(),
allOf(
containsString("Hot threads at"),
containsString("interval=500ms"),
containsString("busiestThreads=3"),
containsString("ignoreIdleThreads=true")
)
);
}
}
return null;
}))
)
);
}

@TestLogging(reason = "testing logging at various levels", value = "org.elasticsearch.action.admin.HotThreadsIT:TRACE")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ static TransportVersion def(int id) {
public static final TransportVersion UPDATE_API_KEY_EXPIRATION_TIME_ADDED = def(8_568_00_0);
public static final TransportVersion LAZY_ROLLOVER_ADDED = def(8_569_00_0);
public static final TransportVersion ESQL_PLAN_POINT_LITERAL_WKB = def(8_570_00_0);
public static final TransportVersion HOT_THREADS_AS_BYTES = def(8_571_00_0);

/*
* STOP! READ THIS FIRST! No, really,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,34 +8,78 @@

package org.elasticsearch.action.admin.cluster.node.hotthreads;

import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.nio.charset.StandardCharsets;

public class NodeHotThreads extends BaseNodeResponse {

private String hotThreads;
private final ReleasableBytesReference bytes;

NodeHotThreads(StreamInput in) throws IOException {
super(in);
hotThreads = in.readString();
if (in.getTransportVersion().onOrAfter(TransportVersions.HOT_THREADS_AS_BYTES)) {
bytes = in.readReleasableBytesReference();
} else {
bytes = ReleasableBytesReference.wrap(new BytesArray(in.readString().getBytes(StandardCharsets.UTF_8)));
}
}

public NodeHotThreads(DiscoveryNode node, String hotThreads) {
public NodeHotThreads(DiscoveryNode node, ReleasableBytesReference hotThreadsUtf8Bytes) {
super(node);
this.hotThreads = hotThreads;
assert hotThreadsUtf8Bytes.hasReferences();
bytes = hotThreadsUtf8Bytes; // takes ownership of the original ref, no need to .retain()
}

public String getHotThreads() {
return this.hotThreads;
return bytes.utf8ToString();
}

public java.io.Reader getHotThreadsReader() {
try {
return new InputStreamReader(bytes.streamInput(), StandardCharsets.UTF_8);
} catch (IOException e) {
assert false : e; // all in-memory, no IO takes place
return new StringReader("ERROR:" + e.toString());
}
}

@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(hotThreads);
if (out.getTransportVersion().onOrAfter(TransportVersions.HOT_THREADS_AS_BYTES)) {
out.writeBytesReference(bytes);
} else {
out.writeString(bytes.utf8ToString());
}
}

@Override
public void incRef() {
bytes.incRef();
}

@Override
public boolean tryIncRef() {
return bytes.tryIncRef();
}

@Override
public boolean decRef() {
return bytes.decRef();
}

@Override
public boolean hasReferences() {
return bytes.hasReferences();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,36 +15,51 @@
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.RefCounted;
import org.elasticsearch.core.Releasables;
import org.elasticsearch.transport.LeakTracker;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.StringReader;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Objects;

public class NodesHotThreadsResponse extends BaseNodesResponse<NodeHotThreads> {

private final RefCounted refs = LeakTracker.wrap(
AbstractRefCounted.of(() -> Releasables.wrap(Iterators.map(getNodes().iterator(), n -> n::decRef)).close())
);

public NodesHotThreadsResponse(ClusterName clusterName, List<NodeHotThreads> nodes, List<FailedNodeException> failures) {
super(clusterName, nodes, failures);
for (NodeHotThreads nodeHotThreads : getNodes()) {
nodeHotThreads.mustIncRef();
}
}

public Iterator<CheckedConsumer<java.io.Writer, IOException>> getTextChunks() {
return Iterators.flatMap(
getNodes().iterator(),
node -> Iterators.concat(
Iterators.single(writer -> writer.append("::: ").append(node.getNode().toString()).append('\n')),
Iterators.map(new LinesIterator(node.getHotThreads()), line -> writer -> writer.append(" ").append(line).append('\n')),
Iterators.single(writer -> writer.append('\n'))
Iterators.map(
new LinesIterator(node.getHotThreadsReader()),
line -> writer -> writer.append(" ").append(line).append('\n')
),
Iterators.single(writer -> {
assert hasReferences();
writer.append('\n');
})
)
);
}

@Override
protected List<NodeHotThreads> readNodesFrom(StreamInput in) throws IOException {
return in.readCollectionAsList(NodeHotThreads::new);
return TransportAction.localOnly();
}

@Override
Expand All @@ -56,8 +71,8 @@ private static class LinesIterator implements Iterator<String> {
final BufferedReader reader;
String nextLine;

private LinesIterator(String input) {
reader = new BufferedReader(new StringReader(Objects.requireNonNull(input)));
private LinesIterator(java.io.Reader reader) {
this.reader = new BufferedReader(reader);
advance();
}

Expand Down Expand Up @@ -86,4 +101,24 @@ public String next() {
}
}
}

@Override
public void incRef() {
refs.incRef();
}

@Override
public boolean tryIncRef() {
return refs.tryIncRef();
}

@Override
public boolean decRef() {
return refs.decRef();
}

@Override
public boolean hasReferences() {
return refs.hasReferences();
}
}

0 comments on commit 205df2a

Please sign in to comment.