diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile index 672061ea09ad9..8cc03dff69d5f 100644 --- a/dev-support/Jenkinsfile +++ b/dev-support/Jenkinsfile @@ -55,7 +55,7 @@ pipeline { environment { YETUS='yetus' // Branch or tag name. Yetus release tags are 'rel/X.Y.Z' - YETUS_VERSION='rel/0.14.0' + YETUS_VERSION='a7d29a6a72750a0c5c39512f33945e773e69303e' } parameters { @@ -71,7 +71,7 @@ pipeline { checkout([ $class: 'GitSCM', branches: [[name: "${env.YETUS_VERSION}"]], - userRemoteConfigs: [[ url: 'https://github.com/apache/yetus.git']]] + userRemoteConfigs: [[ url: 'https://github.com/ayushtkn/yetus.git']]] ) } } diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SubjectUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SubjectUtil.java index e364f04059620..7517fd033cc56 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SubjectUtil.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SubjectUtil.java @@ -61,6 +61,9 @@ public final class SubjectUtil { private static final int JAVA_SPEC_VER = Math.max(8, Integer.parseInt(System.getProperty("java.specification.version").split("\\.")[0])); + /** + * True if the current JVM copies the current JAAS subject into new threads automatically. + */ public static final boolean THREAD_INHERITS_SUBJECT = checkThreadInheritsSubject(); /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index 1c451ca6d30b9..0ca13ad0d79e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -22,6 +22,7 @@ import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -105,15 +106,16 @@ public Collection getChangedProperties( /** * A background thread to apply configuration changes. */ - private static class ReconfigurationThread extends Thread { + private static class ReconfigurationThread extends SubjectInheritingThread { private ReconfigurableBase parent; ReconfigurationThread(ReconfigurableBase base) { + super(); this.parent = base; } // See {@link ReconfigurationServlet#applyChanges} - public void run() { + public void work() { LOG.info("Starting reconfiguration task."); final Configuration oldConf = parent.getConf(); final Configuration newConf = parent.getNewConf(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java index d7b61346d4e3b..43e7121dc26d2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -107,7 +108,7 @@ void init() { */ private void initRefreshThread(boolean runImmediately) { if (refreshInterval > 0) { - refreshUsed = new Thread(new RefreshThread(this, runImmediately), + refreshUsed = new SubjectInheritingThread(new RefreshThread(this, runImmediately), "refreshUsed-" + dirPath); refreshUsed.setDaemon(true); refreshUsed.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 794855508c63f..0892db697d7a8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +39,7 @@ */ @InterfaceAudience.Private public class DelegationTokenRenewer - extends Thread { + extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory .getLogger(DelegationTokenRenewer.class); @@ -263,7 +264,7 @@ public void removeRenewAction( } @Override - public void run() { + public void work() { for(;;) { RenewAction action = null; try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 930abf0b5d172..957cac07d972c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -81,6 +81,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.tracing.TraceScope; import org.apache.hadoop.util.Preconditions; @@ -4087,7 +4088,7 @@ private interface StatisticsAggregator { static { STATS_DATA_REF_QUEUE = new ReferenceQueue<>(); // start a single daemon cleaner thread - STATS_DATA_CLEANER = new Thread(new StatisticsDataReferenceCleaner()); + STATS_DATA_CLEANER = new SubjectInheritingThread(new StatisticsDataReferenceCleaner()); STATS_DATA_CLEANER. setName(StatisticsDataReferenceCleaner.class.getName()); STATS_DATA_CLEANER.setDaemon(true); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java index 12a24fd079e62..86ec95c14e308 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ha; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import java.io.BufferedReader; @@ -50,7 +51,7 @@ enum StreamType { this.stream = stream; this.type = type; - thread = new Thread(new Runnable() { + thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 5caf27edcdaf2..b036caedfa3e1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -54,6 +54,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGet; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.Tracer; import org.slf4j.Logger; @@ -407,7 +408,7 @@ public synchronized void setRpcResponse(Writable rpcResponse) { /** Thread that reads responses and notifies callers. Each connection owns a * socket connected to a remote address. Calls are multiplexed through this * socket: responses may be delivered out of order. */ - private class Connection extends Thread { + private class Connection extends SubjectInheritingThread { private InetSocketAddress server; // server ip:port private final ConnectionId remoteId; // connection id private AuthMethod authMethod; // authentication method @@ -448,7 +449,7 @@ private class Connection extends Thread { Consumer removeMethod) { this.remoteId = remoteId; this.server = remoteId.getAddress(); - this.rpcRequestThread = new Thread(new RpcRequestSender(), + this.rpcRequestThread = new SubjectInheritingThread(new RpcRequestSender(), "IPC Parameter Sending Thread for " + remoteId); this.rpcRequestThread.setDaemon(true); @@ -1126,7 +1127,7 @@ private synchronized void sendPing() throws IOException { } @Override - public void run() { + public void work() { try { // Don't start the ipc parameter sending thread until we start this // thread, because the shutdown logic only gets triggered if this diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index f0ed1d4a97f7f..3d23588ff64a1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -124,6 +124,8 @@ import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; + import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.tracing.Span; import org.apache.hadoop.tracing.SpanContext; @@ -1471,7 +1473,7 @@ public String toString() { } /** Listens on the socket. Creates jobs for the handler threads*/ - private class Listener extends Thread { + private class Listener extends SubjectInheritingThread { private ServerSocketChannel acceptChannel = null; //the accept channel private Selector selector = null; //the selector that we use for the server @@ -1520,7 +1522,7 @@ void setIsAuxiliary() { this.isOnAuxiliaryPort = true; } - private class Reader extends Thread { + private class Reader extends SubjectInheritingThread { final private BlockingQueue pendingConnections; private final Selector readSelector; @@ -1533,7 +1535,7 @@ private class Reader extends Thread { } @Override - public void run() { + public void work() { LOG.info("Starting " + Thread.currentThread().getName()); try { doRunLoop(); @@ -1612,7 +1614,7 @@ void shutdown() { } @Override - public void run() { + public void work() { LOG.info(Thread.currentThread().getName() + ": starting"); SERVER.set(Server.this); connectionManager.startIdleScan(); @@ -1760,7 +1762,7 @@ Reader getReader() { } // Sends responses of RPC back to clients. - private class Responder extends Thread { + private class Responder extends SubjectInheritingThread { private final Selector writeSelector; private int pending; // connections waiting to register @@ -1772,7 +1774,7 @@ private class Responder extends Thread { } @Override - public void run() { + public void work() { LOG.info(Thread.currentThread().getName() + ": starting"); SERVER.set(Server.this); try { @@ -3219,7 +3221,7 @@ private void internalQueueCall(Call call, boolean blocking) } /** Handles queued calls . */ - private class Handler extends Thread { + private class Handler extends SubjectInheritingThread { public Handler(int instanceNumber) { this.setDaemon(true); this.setName("IPC Server handler "+ instanceNumber + @@ -3227,7 +3229,7 @@ public Handler(int instanceNumber) { } @Override - public void run() { + public void work() { LOG.debug("{}: starting", Thread.currentThread().getName()); SERVER.set(Server.this); while (running) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index c8843f2812e57..ae2c890fa1c89 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsSink; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +49,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { private final MetricsSink sink; private final MetricsFilter sourceFilter, recordFilter, metricFilter; private final SinkQueue queue; - private final Thread sinkThread; + private final SubjectInheritingThread sinkThread; private volatile boolean stopping = false; private volatile boolean inError = false; private final int periodMs, firstRetryDelay, retryCount; @@ -84,8 +85,8 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { "Dropped updates per sink", 0); qsize = registry.newGauge("Sink_"+ name + "Qsize", "Queue size", 0); - sinkThread = new Thread() { - @Override public void run() { + sinkThread = new SubjectInheritingThread() { + @Override public void work() { publishMetricsFromQueue(); } }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index 5c8a3357a3ee6..9cfd5e0c1d52a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -36,6 +36,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; @@ -440,7 +441,7 @@ private void sendCallbackAndRemove(String caller, } @VisibleForTesting - final Thread watcherThread = new Thread(new Runnable() { + final Thread watcherThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { if (LOG.isDebugEnabled()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index b6be569026fd7..2f0933782d8fc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -89,6 +89,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -930,7 +931,7 @@ private void executeAutoRenewalTask(final String userName, new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new SubjectInheritingThread(r); t.setDaemon(true); t.setName("TGT Renewer for " + userName); return t; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 9cf3ccdd445e7..194042948bdf1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -60,7 +60,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.functional.InvocationRaisingIOE; import org.slf4j.Logger; @@ -912,12 +912,12 @@ public boolean isRunning() { return running; } - private class ExpiredTokenRemover extends Thread { + private class ExpiredTokenRemover extends SubjectInheritingThread { private long lastMasterKeyUpdate; private long lastTokenCacheCleanup; @Override - public void run() { + public void work() { LOG.info("Starting expired delegation token remover thread, " + "tokenRemoverScanInterval=" + tokenRemoverScanInterval / (60 * 1000) + " min(s)"); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java index 4d43c3a106f5e..9400f9590b2c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,7 +117,7 @@ public void interrupted(IrqHandler.InterruptData interruptData) { //start an async shutdown thread with a timeout ServiceForcedShutdown shutdown = new ServiceForcedShutdown(service, shutdownTimeMillis); - Thread thread = new Thread(shutdown); + Thread thread = new SubjectInheritingThread(shutdown); thread.setDaemon(true); thread.setName("Service Forced Shutdown"); thread.start(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java index a3bf4faf0a980..6c9157a8a7002 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AsyncDiskService.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,7 +75,7 @@ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new Thread(threadGroup, r); + return new SubjectInheritingThread(threadGroup, r); } }; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java index 5c90e4bd2d601..c7b249444bc00 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/BlockingThreadPoolExecutorService.java @@ -29,6 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * This ExecutorService blocks the submission of new tasks when its queue is @@ -71,7 +72,7 @@ static ThreadFactory getNamedThreadFactory(final String prefix) { public Thread newThread(Runnable r) { final String name = prefix + "-pool" + poolNum + "-t" + threadNumber.getAndIncrement(); - return new Thread(group, r, name); + return new SubjectInheritingThread(group, r, name); } }; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java index 95d0d4d290ccd..b27fed0fa659f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java @@ -23,13 +23,15 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; + /** * This class monitors the percentage of time the JVM is paused in GC within * the specified observation window, say 1 minute. The user can provide a * hook which will be called whenever this percentage exceeds the specified * threshold. */ -public class GcTimeMonitor extends Thread { +public class GcTimeMonitor extends SubjectInheritingThread { private final long maxGcTimePercentage; private final long observationWindowMs, sleepIntervalMs; @@ -151,7 +153,7 @@ public GcTimeMonitor(long observationWindowMs, long sleepIntervalMs, } @Override - public void run() { + public void work() { startTime = System.currentTimeMillis(); curData.timestamp = startTime; gcDataBuf[startIdx].setValues(startTime, 0); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index c53ddc0725ca2..775f3b34edb16 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -38,6 +38,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1020,9 +1021,9 @@ private void runCommand() throws IOException { // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new Thread() { + Thread errThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java index e85f850514b16..07d8fb07ac1a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -84,9 +85,9 @@ public final class ShutdownHookManager { static { try { Runtime.getRuntime().addShutdownHook( - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { if (MGR.shutdownInProgress.getAndSet(true)) { LOG.info("Shutdown process invoked a second time: ignoring"); return; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java index e9b6745340d45..afb4c5bb36ef2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/SubjectInheritingThread.java @@ -40,8 +40,8 @@ * Thread. *

* {@link #run()} cannot be directly overridden, as that would also override the - * subject restoration logic. SubjectInheritingThread provides a {@link work()} - * method instead, which is wrapped and invoked by its own final {@link run()} + * subject restoration logic. SubjectInheritingThread provides a {@link #work()} + * method instead, which is wrapped and invoked by its own final {@link #run()} * method. */ public class SubjectInheritingThread extends Thread { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 6bb7c2bd1d3f9..56ec25ebd8da8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -81,6 +81,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.util.PlatformName.IBM_JAVA; @@ -2478,7 +2479,7 @@ public void testConcurrentAccesses() throws Exception { Configuration conf = new Configuration(); conf.addResource(fileResource); - class ConfigModifyThread extends Thread { + class ConfigModifyThread extends SubjectInheritingThread { final private Configuration config; final private String prefix; @@ -2488,7 +2489,7 @@ public ConfigModifyThread(Configuration conf, String prefix) { } @Override - public void run() { + public void work() { for (int i = 0; i < 10000; i++) { config.set("some.config.value-" + prefix + i, "value"); } @@ -2740,7 +2741,7 @@ private static Configuration checkCDATA(byte[] bytes) { @Test public void testConcurrentModificationDuringIteration() throws InterruptedException { Configuration configuration = new Configuration(); - new Thread(() -> { + new SubjectInheritingThread(() -> { while (true) { configuration.set(String.valueOf(Math.random()), String.valueOf(Math.random())); } @@ -2748,7 +2749,7 @@ public void testConcurrentModificationDuringIteration() throws InterruptedExcept AtomicBoolean exceptionOccurred = new AtomicBoolean(false); - new Thread(() -> { + new SubjectInheritingThread(() -> { while (true) { try { configuration.iterator(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java index c475f7c826bb3..3f089a59d0f20 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestReconfiguration.java @@ -22,6 +22,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -284,7 +285,7 @@ public void testReconfigure() { public void testThread() throws ReconfigurationException { ReconfigurableDummy dummy = new ReconfigurableDummy(conf1); assertTrue(dummy.getConf().get(PROP1).equals(VAL1)); - Thread dummyThread = new Thread(dummy); + Thread dummyThread = new SubjectInheritingThread(dummy); dummyThread.start(); try { Thread.sleep(500); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java index 80edbeab4c8f8..aa50f0d9f49ef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FCStatisticsBaseTest.java @@ -40,6 +40,7 @@ import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,9 +74,9 @@ public void testStatisticsOperations() throws Exception { stats.incrementWriteOps(123); assertEquals(123, stats.getWriteOps()); - Thread thread = new Thread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { stats.incrementWriteOps(1); } }; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java index 119bad41a3028..491387efa250c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java @@ -36,7 +36,7 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; - +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_CREATION_PARALLEL_COUNT; @@ -125,9 +125,9 @@ public void initialize(URI uri, Configuration conf) throws IOException { @Test public void testCacheEnabledWithInitializeForeverFS() throws Exception { final Configuration conf = new Configuration(); - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { conf.set("fs.localfs1.impl", "org.apache.hadoop.fs." + "TestFileSystemCaching$InitializeForeverFileSystem"); try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index 89d7419f763d2..a1241a384d49a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -49,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * This class tests commands from Trash. @@ -724,7 +725,7 @@ public void testTrashEmptier() throws Exception { // Start Emptier in background Runnable emptier = trash.getEmptier(); - Thread emptierThread = new Thread(emptier); + Thread emptierThread = new SubjectInheritingThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -792,7 +793,7 @@ public void testTrashEmptierCleanDirNotInCheckpointDir() throws Exception { // Start Emptier in background. Runnable emptier = trash.getEmptier(); - Thread emptierThread = new Thread(emptier); + Thread emptierThread = new SubjectInheritingThread(emptier); emptierThread.start(); FsShell shell = new FsShell(); @@ -1049,7 +1050,7 @@ private void verifyAuditableTrashEmptier(Trash trash, Thread emptierThread = null; try { Runnable emptier = trash.getEmptier(); - emptierThread = new Thread(emptier); + emptierThread = new SubjectInheritingThread(emptier); emptierThread.start(); // Shutdown the emptier thread after a given time diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index d52abbc2a99bd..75a28b59767f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -46,6 +46,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; @@ -215,7 +216,7 @@ public LoadGenerator(Configuration conf) throws IOException, UnknownHostExceptio * A thread runs for the specified elapsed time if the time isn't zero. * Otherwise, it runs forever. */ - private class DFSClientThread extends Thread { + private class DFSClientThread extends SubjectInheritingThread { private int id; private long [] executionTime = new long[TOTAL_OP_TYPES]; private long [] totalNumOfOps = new long[TOTAL_OP_TYPES]; @@ -230,7 +231,7 @@ private DFSClientThread(int id) { * Each iteration decides what's the next operation and then pauses. */ @Override - public void run() { + public void work() { try { while (shouldRun) { nextOp(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java index 99a8fc71898df..40d75c38d4de9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java @@ -18,6 +18,7 @@ package org.apache.hadoop.io; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -93,9 +94,9 @@ public void testMD5Hash() throws Exception { assertTrue(closeHash1.hashCode() != closeHash2.hashCode(), "hash collision"); - Thread t1 = new Thread() { + SubjectInheritingThread t1 = new SubjectInheritingThread() { @Override - public void run() { + public void work() { for (int i = 0; i < 100; i++) { MD5Hash hash = new MD5Hash(DFF); assertEquals(hash, md5HashFF); @@ -103,9 +104,9 @@ public void run() { } }; - Thread t2 = new Thread() { + SubjectInheritingThread t2 = new SubjectInheritingThread() { @Override - public void run() { + public void work() { for (int i = 0; i < 100; i++) { MD5Hash hash = new MD5Hash(D00); assertEquals(hash, md5Hash00); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index 95c4abe5e3907..7ca61a6358ce7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -27,6 +27,7 @@ import org.apache.hadoop.constants.ConfigConstants; import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; @@ -300,13 +301,13 @@ public void testTextText() throws CharacterCodingException { assertEquals(8, a.copyBytes().length); } - private class ConcurrentEncodeDecodeThread extends Thread { + private class ConcurrentEncodeDecodeThread extends SubjectInheritingThread { public ConcurrentEncodeDecodeThread(String name) { super(name); } @Override - public void run() { + public void work() { final String name = this.getName(); DataOutputBuffer out = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 7d4f24efd52ce..9bccb52a6048c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -56,6 +56,8 @@ import org.apache.hadoop.test.StatUtils; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; + import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.*; import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat.*; import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; @@ -135,9 +137,9 @@ public void testMultiThreadedFstat() throws Exception { new AtomicReference(); List statters = new ArrayList(); for (int i = 0; i < 10; i++) { - Thread statter = new Thread() { + SubjectInheritingThread statter = new SubjectInheritingThread() { @Override - public void run() { + public void work() { long et = Time.now() + 5000; while (Time.now() < et) { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java index d44727b4b65b6..badbbfa23d396 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -44,15 +45,15 @@ public class TestNativeIoInit { @Test @Timeout(value = 10) public void testDeadlockLinux() throws Exception { - Thread one = new Thread() { + Thread one = new SubjectInheritingThread() { @Override - public void run() { + public void work() { NativeIO.isAvailable(); } }; - Thread two = new Thread() { + Thread two = new SubjectInheritingThread() { @Override - public void run() { + public void work() { NativeIO.POSIX.isAvailable(); } }; @@ -66,15 +67,15 @@ public void run() { @Timeout(value = 10) public void testDeadlockWindows() throws Exception { assumeTrue(Path.WINDOWS, "Expected windows"); - Thread one = new Thread() { + SubjectInheritingThread one = new SubjectInheritingThread() { @Override - public void run() { + public void work() { NativeIO.isAvailable(); } }; - Thread two = new Thread() { + SubjectInheritingThread two = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { NativeIO.Windows.extendWorkingSetSize(100); } catch (IOException e) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java index a541ea99fcfdf..2533f0944dda7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java @@ -28,6 +28,7 @@ import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; public class TestFailoverProxy { @@ -252,7 +253,7 @@ public String failsIfIdentifierDoesntMatch(String identifier) } - private static class ConcurrentMethodThread extends Thread { + private static class ConcurrentMethodThread extends SubjectInheritingThread { private UnreliableInterface unreliable; public String result; @@ -262,7 +263,7 @@ public ConcurrentMethodThread(UnreliableInterface unreliable) { } @Override - public void run() { + public void work() { try { result = unreliable.failsIfIdentifierDoesntMatch("impl2"); } catch (Exception e) { @@ -327,9 +328,9 @@ public void testFailoverBetweenMultipleStandbys() RetryPolicies.failoverOnNetworkException( RetryPolicies.TRY_ONCE_THEN_FAIL, 10, 1000, 10000)); - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep); impl1.setIdentifier("renamed-impl1"); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java index 1e0afe587ca96..fe98abce97787 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAsyncIPC.java @@ -30,6 +30,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.AsyncGetFuture; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -73,7 +74,7 @@ public void setupConf() { Client.setAsynchronousMode(true); } - static class AsyncCaller extends Thread { + static class AsyncCaller extends SubjectInheritingThread { private Client client; private InetSocketAddress server; private int count; @@ -96,7 +97,7 @@ static class AsyncCaller extends Thread { } @Override - public void run() { + public void work() { // In case Thread#Start is called, which will spawn new thread. Client.setAsynchronousMode(true); for (int i = 0; i < count; i++) { @@ -154,7 +155,7 @@ void assertReturnValues(long timeout, TimeUnit unit) * For testing the asynchronous calls of the RPC client * implemented with CompletableFuture. */ - static class AsyncCompletableFutureCaller extends Thread { + static class AsyncCompletableFutureCaller extends SubjectInheritingThread { private final Client client; private final InetSocketAddress server; private final int count; @@ -171,7 +172,7 @@ static class AsyncCompletableFutureCaller extends Thread { } @Override - public void run() { + public void work() { // Set the RPC client to use asynchronous mode. Client.setAsynchronousMode(true); long startTime = Time.monotonicNow(); @@ -204,7 +205,7 @@ public void assertReturnValues() } } - static class AsyncLimitlCaller extends Thread { + static class AsyncLimitlCaller extends SubjectInheritingThread { private Client client; private InetSocketAddress server; private int count; @@ -242,7 +243,7 @@ public AsyncLimitlCaller(int callerId, Client client, InetSocketAddress server, } @Override - public void run() { + public void work() { // in case Thread#Start is called, which will spawn new thread Client.setAsynchronousMode(true); for (int i = 0; i < count; i++) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java index bc607d762a3cd..044c60fe07450 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -151,7 +152,7 @@ public void assertCanTake(CallQueueManager cq, int numberOfTakes, int takeAttempts) throws InterruptedException { Taker taker = new Taker(cq, takeAttempts, -1); - Thread t = new Thread(taker); + Thread t = new SubjectInheritingThread(taker); t.start(); t.join(100); @@ -164,7 +165,7 @@ public void assertCanPut(CallQueueManager cq, int numberOfPuts, int putAttempts) throws InterruptedException { Putter putter = new Putter(cq, putAttempts, -1); - Thread t = new Thread(putter); + Thread t = new SubjectInheritingThread(putter); t.start(); t.join(100); @@ -277,7 +278,7 @@ public void testSwapUnderContention() throws InterruptedException { // Create putters and takers for (int i=0; i < 1000; i++) { Putter p = new Putter(manager, -1, -1); - Thread pt = new Thread(p); + Thread pt = new SubjectInheritingThread(p); producers.add(p); threads.put(p, pt); @@ -286,7 +287,7 @@ public void testSwapUnderContention() throws InterruptedException { for (int i=0; i < 100; i++) { Taker t = new Taker(manager, -1, -1); - Thread tt = new Thread(t); + Thread tt = new SubjectInheritingThread(t); consumers.add(t); threads.put(t, tt); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java index 1afc88c562c8e..107a9f8587bf5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java @@ -50,6 +50,7 @@ import java.util.List; import java.util.concurrent.BlockingQueue; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException; @@ -684,7 +685,7 @@ public void assertCanTake(BlockingQueue cq, int numberOfTakes, CountDownLatch latch = new CountDownLatch(numberOfTakes); Taker taker = new Taker(cq, takeAttempts, "default", latch); - Thread t = new Thread(taker); + Thread t = new SubjectInheritingThread(taker); t.start(); latch.await(); @@ -698,7 +699,7 @@ public void assertCanPut(BlockingQueue cq, int numberOfPuts, CountDownLatch latch = new CountDownLatch(numberOfPuts); Putter putter = new Putter(cq, putAttempts, null, latch); - Thread t = new Thread(putter); + Thread t = new SubjectInheritingThread(putter); t.start(); latch.await(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index a191095b44516..b9a45fe28b825 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -103,6 +103,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -252,7 +253,7 @@ public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, } } - private static class SerialCaller extends Thread { + private static class SerialCaller extends SubjectInheritingThread { private Client client; private InetSocketAddress server; private int count; @@ -265,7 +266,7 @@ public SerialCaller(Client client, InetSocketAddress server, int count) { } @Override - public void run() { + public void work() { for (int i = 0; i < count; i++) { try { final long param = RANDOM.nextLong(); @@ -996,7 +997,7 @@ private void checkBlocking(int readers, int readerQ, int callQ) throws Exception // instantiate the threads, will start in batches Thread[] threads = new Thread[clients]; for (int i=0; i future = new FutureTask(clientCallable); - Thread clientThread = new Thread(future); + Thread clientThread = new SubjectInheritingThread(future); clientThread.start(); server.awaitInvocation(); @@ -146,7 +147,7 @@ public void testDeferredException() throws IOException, InterruptedException, new ClientCallable(serverAddress, conf, requestBytes); FutureTask future = new FutureTask(clientCallable); - Thread clientThread = new Thread(future); + Thread clientThread = new SubjectInheritingThread(future); clientThread.start(); server.awaitInvocation(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java index 51c66abb3fc26..50202214fe559 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java @@ -38,6 +38,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.SocksSocketFactory; import org.apache.hadoop.net.StandardSocketFactory; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -63,7 +64,7 @@ public class TestSocketFactory { private void startTestServer() throws Exception { // start simple tcp server. serverRunnable = new ServerRunnable(); - serverThread = new Thread(serverRunnable); + serverThread = new SubjectInheritingThread(serverRunnable); serverThread.start(); final long timeout = System.currentTimeMillis() + START_STOP_TIMEOUT_SEC * 1000; while (!serverRunnable.isReady()) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java index 33f9946e94d9e..69a732686dfae 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestSinkQueue.java @@ -21,6 +21,7 @@ import java.util.ConcurrentModificationException; import java.util.concurrent.CountDownLatch; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,8 +80,8 @@ private void testEmptyBlocking(int awhile) throws Exception { final SinkQueue q = new SinkQueue(2); final Runnable trigger = mock(Runnable.class); // try consuming emtpy equeue and blocking - Thread t = new Thread() { - @Override public void run() { + SubjectInheritingThread t = new SubjectInheritingThread() { + @Override public void work() { try { assertEquals(1, (int) q.dequeue(), "element"); q.consume(new Consumer() { @@ -255,8 +256,8 @@ private SinkQueue newSleepingConsumerQueue(int capacity, q.enqueue(i); } final CountDownLatch barrier = new CountDownLatch(1); - Thread t = new Thread() { - @Override public void run() { + SubjectInheritingThread t = new SubjectInheritingThread() { + @Override public void work() { try { Thread.sleep(10); // causes failure without barrier q.consume(new Consumer() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java index f423b57d1c3e2..0e5df58ded530 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java @@ -40,6 +40,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.util.Quantile; import org.apache.hadoop.thirdparty.com.google.common.math.Stats; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -209,7 +210,7 @@ interface TestProtocol { rates.add("metric" + i, 0); } - Thread[] threads = new Thread[n]; + SubjectInheritingThread[] threads = new SubjectInheritingThread[n]; final CountDownLatch firstAddsFinished = new CountDownLatch(threads.length); final CountDownLatch firstSnapshotsFinished = new CountDownLatch(1); final CountDownLatch secondAddsFinished = @@ -220,9 +221,9 @@ interface TestProtocol { final Random sleepRandom = new Random(seed); for (int tIdx = 0; tIdx < threads.length; tIdx++) { final int threadIdx = tIdx; - threads[threadIdx] = new Thread() { + threads[threadIdx] = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { for (int i = 0; i < 1000; i++) { rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java index 2110f33981dde..7429e7e525a9a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/source/TestJvmMetrics.java @@ -39,6 +39,7 @@ import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JvmPauseMonitor; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Timeout; import java.util.ArrayList; @@ -296,11 +297,11 @@ private static void updateThreadsAndWait(List threads, } } - static class TestThread extends Thread { + static class TestThread extends SubjectInheritingThread { private volatile boolean exit = false; private boolean exited = false; @Override - public void run() { + public void work() { while (!exit) { try { Thread.sleep(1000); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java index a1704a0ec3013..f5f253eec89b3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java @@ -47,6 +47,7 @@ import org.apache.hadoop.net.unix.DomainSocket.DomainChannel; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.io.Files; @@ -457,8 +458,8 @@ void testClientServer1(final Class writeStrategyClass, new ArrayBlockingQueue(2); final DomainSocket serv = (preConnectedSockets != null) ? null : DomainSocket.bindAndListen(TEST_PATH); - Thread serverThread = new Thread() { - public void run(){ + Thread serverThread = new SubjectInheritingThread() { + public void work(){ // Run server DomainSocket conn = null; try { @@ -485,8 +486,8 @@ public void run(){ }; serverThread.start(); - Thread clientThread = new Thread() { - public void run(){ + SubjectInheritingThread clientThread = new SubjectInheritingThread() { + public void work(){ try { DomainSocket client = preConnectedSockets != null ? preConnectedSockets[1] : DomainSocket.connect(TEST_PATH); @@ -626,8 +627,8 @@ public void testFdPassing() throws Exception { for (int i = 0; i < passedFiles.length; i++) { passedFds[i] = passedFiles[i].getInputStream().getFD(); } - Thread serverThread = new Thread() { - public void run(){ + Thread serverThread = new SubjectInheritingThread() { + public void work(){ // Run server DomainSocket conn = null; try { @@ -649,8 +650,8 @@ public void run(){ }; serverThread.start(); - Thread clientThread = new Thread() { - public void run(){ + Thread clientThread = new SubjectInheritingThread() { + public void work(){ try { DomainSocket client = DomainSocket.connect(TEST_PATH); OutputStream clientOutputStream = client.getOutputStream(); @@ -783,7 +784,7 @@ public void run() { } } }; - Thread readerThread = new Thread(reader); + Thread readerThread = new SubjectInheritingThread(reader); readerThread.start(); socks[0].getOutputStream().write(1); socks[0].getOutputStream().write(2); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java index f78005a6ed3f2..58fe3f44ecf90 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java @@ -32,6 +32,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -128,7 +129,7 @@ public void testStress() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new Thread(new Runnable() { + final Thread adderThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -155,7 +156,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new Thread(new Runnable() { + final Thread removerThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { final Random random = new Random(); @@ -199,7 +200,7 @@ public void testStressInterruption() throws Exception { final ArrayList pairs = new ArrayList(); final AtomicInteger handled = new AtomicInteger(0); - final Thread adderThread = new Thread(new Runnable() { + final Thread adderThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -227,7 +228,7 @@ public boolean handle(DomainSocket sock) { } }); - final Thread removerThread = new Thread(new Runnable() { + final Thread removerThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { final Random random = new Random(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java index fe6bc4f58de93..136e28b56d4a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthorizationContext.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.security; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -42,7 +43,7 @@ public void testClearAuthorizationHeader() { public void testThreadLocalIsolation() throws Exception { byte[] mainHeader = "main-thread".getBytes(); AuthorizationContext.setCurrentAuthorizationHeader(mainHeader); - Thread t = new Thread(() -> { + SubjectInheritingThread t = new SubjectInheritingThread(() -> { Assertions.assertNull(AuthorizationContext.getCurrentAuthorizationHeader()); byte[] threadHeader = "other-thread".getBytes(); AuthorizationContext.setCurrentAuthorizationHeader(threadHeader); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java index 5a2927f71c18e..74f00f9d8e91f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java @@ -31,6 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.FakeTimer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -406,10 +407,10 @@ public void testOnlyOneRequestWhenNoEntryIsCached() throws Exception { FakeGroupMapping.clearBlackList(); FakeGroupMapping.setGetGroupsDelayMs(100); - ArrayList threads = new ArrayList(); + ArrayList threads = new ArrayList(); for (int i = 0; i < 10; i++) { - threads.add(new Thread() { - public void run() { + threads.add(new SubjectInheritingThread() { + public void work() { try { assertEquals(2, groups.getGroups("me").size()); } catch (IOException e) { @@ -451,10 +452,10 @@ public void testOnlyOneRequestWhenExpiredEntryExists() throws Exception { timer.advance(400 * 1000); Thread.sleep(100); - ArrayList threads = new ArrayList(); + ArrayList threads = new ArrayList(); for (int i = 0; i < 10; i++) { - threads.add(new Thread() { - public void run() { + threads.add(new SubjectInheritingThread() { + public void work() { try { assertEquals(2, groups.getGroups("me").size()); } catch (IOException e) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java index 1bb43ffcd5eb5..9d0661af3e028 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java @@ -59,6 +59,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.security.alias.JavaKeyStoreProvider; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -414,7 +415,7 @@ public void testLdapConnectionTimeout() // Below we create a LDAP server which will accept a client request; // but it will never reply to the bind (connect) request. // Client of this LDAP server is expected to get a connection timeout. - final Thread ldapServer = new Thread(new Runnable() { + final Thread ldapServer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -469,7 +470,7 @@ public void testLdapReadTimeout() throws IOException, InterruptedException { // authenticate it successfully; but it will never reply to the following // query request. // Client of this LDAP server is expected to get a read timeout. - final Thread ldapServer = new Thread(new Runnable() { + final Thread ldapServer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java index 2fb5b6c22eb71..626021b18c6b0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java @@ -33,6 +33,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -1023,12 +1024,12 @@ public Void run() throws Exception { }}); } - static class GetTokenThread extends Thread { + static class GetTokenThread extends SubjectInheritingThread { boolean runThread = true; volatile ConcurrentModificationException cme = null; @Override - public void run() { + public void work() { while(runThread) { try { UserGroupInformation.getCurrentUser().getCredentials(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java index ccbc0a009fbf5..67f6902e301bb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceLifecycle.java @@ -25,6 +25,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -404,7 +405,7 @@ private AsyncSelfTerminatingService(int timeout) { @Override protected void serviceStart() throws Exception { - new Thread(this).start(); + new SubjectInheritingThread(this).start(); super.serviceStart(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java index 3093aa1ff5d58..6f917467f2b34 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/testservices/RunningService.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,7 +59,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - Thread thread = new Thread(this); + Thread thread = new SubjectInheritingThread(this); thread.setName(getName()); thread.start(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java index e270ee68000eb..a8dbd395b82f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MultithreadedTestUtil.java @@ -21,6 +21,7 @@ import java.util.Set; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -175,7 +176,7 @@ public Iterable getTestThreads() { * A thread that can be added to a test context, and properly * passes exceptions through. */ - public static abstract class TestingThread extends Thread { + public static abstract class TestingThread extends SubjectInheritingThread { protected final TestContext ctx; protected boolean stopped; @@ -184,7 +185,7 @@ public TestingThread(TestContext ctx) { } @Override - public void run() { + public void work() { try { doWork(); } catch (Throwable t) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java index 6805dcd2fd4b3..1b6ddcc263afe 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestTimedOutTestsListener.java @@ -23,6 +23,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -59,7 +60,7 @@ public Deadlock() { } } - class DeadlockThread extends Thread { + class DeadlockThread extends SubjectInheritingThread { private Lock lock1 = null; private Lock lock2 = null; @@ -84,7 +85,7 @@ class DeadlockThread extends Thread { this.useSync = false; } - public void run() { + public void work() { if (useSync) { syncLock(); } else { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java index 90beb58aee449..6080c1b37e82d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestAutoCloseableLock.java @@ -21,6 +21,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; + import org.junit.jupiter.api.Test; /** * A test class for AutoCloseableLock. @@ -54,9 +56,9 @@ public void testMultipleThread() throws Exception { AutoCloseableLock lock = new AutoCloseableLock(); lock.acquire(); assertTrue(lock.isLocked()); - Thread competingThread = new Thread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertTrue(lock.isLocked()); assertFalse(lock.tryLock()); } @@ -79,9 +81,9 @@ public void testTryWithResourceSyntax() throws Exception { try(AutoCloseableLock localLock = lock.acquire()) { assertEquals(localLock, lock); assertTrue(lock.isLocked()); - Thread competingThread = new Thread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertTrue(lock.isLocked()); assertFalse(lock.tryLock()); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java index fb9e773c7d06e..0ff2bdeafc388 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedLock.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -52,9 +53,9 @@ public void testMultipleThread(TestInfo testInfo) throws Exception { InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300); lock.lock(); try { - Thread competingThread = new Thread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertFalse(lock.tryLock()); } }; @@ -89,9 +90,9 @@ public void unlock() { AutoCloseableLock acl = new AutoCloseableLock(lock); try (AutoCloseable localLock = acl.acquire()) { assertEquals(acl, localLock); - Thread competingThread = new Thread() { + SubjectInheritingThread competingThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertNotEquals(Thread.currentThread(), lockThread.get()); assertFalse(lock.tryLock()); } @@ -253,7 +254,7 @@ void logWaitWarning(long lockHeldTime, SuppressedSnapshot stats) { private Thread lockUnlockThread(Lock lock) throws InterruptedException { CountDownLatch countDownLatch = new CountDownLatch(1); - Thread t = new Thread(() -> { + Thread t = new SubjectInheritingThread(() -> { try { assertFalse(lock.tryLock()); countDownLatch.countDown(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java index 6bb5d08e154df..aaf75234b0ecc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestInstrumentedReadWriteLock.java @@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.api.Timeout; @@ -68,17 +69,17 @@ public void release() { final AutoCloseableLock readLock = new AutoCloseableLock( readWriteLock.readLock()); try (AutoCloseableLock lock = writeLock.acquire()) { - Thread competingWriteThread = new Thread() { + Thread competingWriteThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertFalse(writeLock.tryLock()); } }; competingWriteThread.start(); competingWriteThread.join(); - Thread competingReadThread = new Thread() { + Thread competingReadThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertFalse(readLock.tryLock()); }; }; @@ -104,18 +105,18 @@ public void testReadLock(TestInfo testInfo) throws Exception { final AutoCloseableLock writeLock = new AutoCloseableLock( readWriteLock.writeLock()); try (AutoCloseableLock lock = readLock.acquire()) { - Thread competingReadThread = new Thread() { + SubjectInheritingThread competingReadThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertTrue(readLock.tryLock()); readLock.release(); } }; competingReadThread.start(); competingReadThread.join(); - Thread competingWriteThread = new Thread() { + SubjectInheritingThread competingWriteThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { assertFalse(writeLock.tryLock()); } }; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java index cee0fcef092f2..60ebc02398e93 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java @@ -29,6 +29,7 @@ import java.util.zip.CRC32; import java.util.zip.Checksum; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -316,7 +317,7 @@ private static BenchResult doBench(Class clazz, final int numThreads, final byte[] bytes, final int size) throws Exception { - final Thread[] threads = new Thread[numThreads]; + final SubjectInheritingThread[] threads = new SubjectInheritingThread[numThreads]; final BenchResult[] results = new BenchResult[threads.length]; { @@ -326,11 +327,11 @@ private static BenchResult doBench(Class clazz, for(int i = 0; i < threads.length; i++) { final int index = i; - threads[i] = new Thread() { + threads[i] = new SubjectInheritingThread() { final Checksum crc = ctor.newInstance(); @Override - public void run() { + public void work() { final long st = System.nanoTime(); crc.reset(); for (int i = 0; i < trials; i++) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java index 7d8bece0d675d..89cc5eefa86cd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java @@ -31,6 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -70,11 +71,11 @@ private void doTestCache() { @Test public void testThreadSafe() throws Exception { - Thread[] th = new Thread[32]; + SubjectInheritingThread[] th = new SubjectInheritingThread[32]; for (int i=0; i void waitForAll(List> furtures) throws Exception { } } - static class AllocatorThread extends Thread { + static class AllocatorThread extends SubjectInheritingThread { private final ByteArrayManager bam; private final int arrayLength; private byte[] array; @@ -237,7 +238,7 @@ static class AllocatorThread extends Thread { } @Override - public void run() { + public void work() { try { array = bam.newByteArray(arrayLength); } catch (InterruptedException e) { @@ -333,9 +334,9 @@ public void testByteArrayManager() throws Exception { } final List exceptions = new ArrayList(); - final Thread randomRecycler = new Thread() { + final Thread randomRecycler = new SubjectInheritingThread() { @Override - public void run() { + public void work() { LOG.info("randomRecycler start"); for(int i = 0; shouldRun(); i++) { final int j = ThreadLocalRandom.current().nextInt(runners.length); @@ -524,7 +525,7 @@ public void run() { Thread start(int n) { this.n = n; - final Thread t = new Thread(this); + final Thread t = new SubjectInheritingThread(this); t.start(); return t; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java index cbbcccf3ca0ba..1298252740eb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java @@ -22,6 +22,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,7 +48,7 @@ public AsyncDataService() { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - return new Thread(threadGroup, r); + return new SubjectInheritingThread(threadGroup, r); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java index 46c16d3c7fa60..8a0242678e5ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; // TODO: convert this to Junit public class TestUdpServer { @@ -68,16 +69,16 @@ public static void main(String[] args) throws InterruptedException { //testDump(); } - static class Runtest1 extends Thread { + static class Runtest1 extends SubjectInheritingThread { @Override - public void run() { + public void work() { testGetportMount(); } } - static class Runtest2 extends Thread { + static class Runtest2 extends SubjectInheritingThread { @Override - public void run() { + public void work() { testDump(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java index 91af1ca06ac7e..aaba9992a257c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/RouterResolver.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer; import org.apache.hadoop.hdfs.server.federation.store.MembershipStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,7 +98,7 @@ private synchronized void updateSubclusterMapping() { if (subclusterMapping == null || (monotonicNow() - lastUpdated) > minUpdateTime) { // Fetch the mapping asynchronously - Thread updater = new Thread(new Runnable() { + Thread updater = new SubjectInheritingThread(new Runnable() { @Override public void run() { final MembershipStore membershipStore = getMembershipStore(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 2ffc5f0b5d893..eb2a466081908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.eclipse.jetty.util.ajax.JSON; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -458,7 +459,7 @@ public void run() { /** * Thread that creates connections asynchronously. */ - static class ConnectionCreator extends Thread { + static class ConnectionCreator extends SubjectInheritingThread { /** If the creator is running. */ private boolean running = true; /** Queue to push work to. */ @@ -470,7 +471,7 @@ static class ConnectionCreator extends Thread { } @Override - public void run() { + public void work() { while (this.running) { try { ConnectionPool pool = this.queue.take(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java index 40ff843fa1dfe..a7c1fb6caabb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -25,13 +25,14 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RefreshMountTableEntriesResponse; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for updating mount table cache on all the router. */ -public class MountTableRefresherThread extends Thread { +public class MountTableRefresherThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(MountTableRefresherThread.class); private boolean success; @@ -61,7 +62,7 @@ public MountTableRefresherThread(MountTableManager manager, * update cache on R2 and R3. */ @Override - public void run() { + public void work() { try { SecurityUtil.doAsLoginUser(() -> { if (UserGroupInformation.isSecurityEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 14cc47ffa1e6e..40697fc14a4c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -59,6 +59,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -391,9 +392,9 @@ protected void serviceStop() throws Exception { * Shutdown the router. */ public void shutDown() { - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { Router.this.stop(); } }.start(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java index 5607ab8109d26..96c5dd13d9d28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; import org.apache.hadoop.hdfs.server.federation.store.records.RouterState; import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,7 +64,7 @@ public RouterHeartbeatService(Router router) { * Trigger the update of the Router state asynchronously. */ protected void updateStateAsync() { - Thread thread = new Thread(this::updateStateStore, "Router Heartbeat Async"); + Thread thread = new SubjectInheritingThread(this::updateStateStore, "Router Heartbeat Async"); thread.setDaemon(true); thread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 1b8c02fd72e28..c0f57336a7ac2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -214,6 +214,7 @@ import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -2507,7 +2508,7 @@ private static class AsyncThreadFactory implements ThreadFactory { @Override public Thread newThread(@NonNull Runnable r) { - Thread thread = new Thread(r, namePrefix + threadNumber.getAndIncrement()); + Thread thread = new SubjectInheritingThread(r, namePrefix + threadNumber.getAndIncrement()); thread.setDaemon(true); return thread; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index 3650ce60f7221..2eaa71076d03b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -133,7 +134,7 @@ public void testConcurrentRefreshRequests() throws InterruptedException { // Spawn 100 concurrent refresh requests Thread[] threads = new Thread[100]; for (int i = 0; i < 100; i++) { - threads[i] = new Thread(() -> + threads[i] = new SubjectInheritingThread(() -> client.refreshFairnessPolicyController(routerContext.getConf())); } @@ -182,7 +183,7 @@ public void testRefreshStaticChangeHandlers() throws Exception { final int newNs1Permits = 4; conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns0", newNs0Permits); conf.setInt(DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX + "ns1", newNs1Permits); - Thread threadRefreshController = new Thread(() -> client. + Thread threadRefreshController = new SubjectInheritingThread(() -> client. refreshFairnessPolicyController(routerContext.getConf())); threadRefreshController.start(); threadRefreshController.join(); @@ -218,7 +219,7 @@ private List makeDummyInvocations(RouterRpcClient client, final int nThr RemoteMethod dummyMethod = Mockito.mock(RemoteMethod.class); List threadAcquirePermits = new ArrayList<>(); for (int i = 0; i < nThreads; i++) { - Thread threadAcquirePermit = new Thread(() -> { + Thread threadAcquirePermit = new SubjectInheritingThread(() -> { try { client.invokeSingle(namespace, dummyMethod); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java index b43c87591d76c..0036d9044d064 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java @@ -45,6 +45,7 @@ import org.apache.hadoop.security.GroupMappingServiceProvider; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -318,7 +319,7 @@ public void testCounter() throws Exception { int expectedSchedulerCount = rpcServer.getSchedulerJobCount() + 1; AtomicInteger maxSchedulerCount = new AtomicInteger(); AtomicBoolean watch = new AtomicBoolean(true); - Thread watcher = new Thread(() -> { + Thread watcher = new SubjectInheritingThread(() -> { while (watch.get()) { int schedulerCount = rpcServer.getSchedulerJobCount(); if (schedulerCount > maxSchedulerCount.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java index 373743299f032..3857b8ebbfacf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java @@ -324,7 +324,7 @@ protected MountTableRefresherThread getLocalRefresher( String adminAddress) { return new MountTableRefresherThread(null, adminAddress) { @Override - public void run() { + public void work() { try { // Sleep 1 minute Thread.sleep(60000); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index ddbfdc9727c3a..648104a01e9a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -152,6 +152,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * The the RPC interface of the {@link Router} implemented by @@ -2393,7 +2394,7 @@ public void testCallerContextNotResetByAsyncHandler() throws IOException { String dirPath = "/test"; // The reason we start this child thread is that CallContext use InheritableThreadLocal. - Thread t1 = new Thread(() -> { + SubjectInheritingThread t1 = new SubjectInheritingThread(() -> { // Set flag async:true. CallerContext.setCurrent( new CallerContext.Builder("async:true").build()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java index 805b955661d5c..51fd4486a6e47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/SyncClass.java @@ -24,6 +24,8 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; + /** * SyncClass implements BaseClass, providing a synchronous * version of the methods. All operations are performed in a @@ -186,7 +188,7 @@ public String timeConsumingMethod(int input) { private ExecutorService getExecutorService() { return Executors.newFixedThreadPool(2, r -> { - Thread t = new Thread(r); + SubjectInheritingThread t = new SubjectInheritingThread(r); t.setDaemon(true); return t; }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index ed29578ee0cf9..1d680702cdcb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -130,7 +130,7 @@ import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; - +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; @@ -5641,7 +5641,7 @@ public int getBlockOpQueueLength() { return blockReportThread.queue.size(); } - private class BlockReportProcessingThread extends Thread { + private class BlockReportProcessingThread extends SubjectInheritingThread { private long lastFull = 0; private final BlockingQueue queue; @@ -5653,7 +5653,7 @@ private class BlockReportProcessingThread extends Thread { } @Override - public void run() { + public void work() { try { processQueue(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 83c179bfe653f..066d6d9a5c2bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.util.RwLockMode; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,7 +66,7 @@ * starts up, and at configurable intervals afterwards. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) -public class CacheReplicationMonitor extends Thread implements Closeable { +public class CacheReplicationMonitor extends SubjectInheritingThread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(CacheReplicationMonitor.class); @@ -159,7 +160,7 @@ public CacheReplicationMonitor(FSNamesystem namesystem, } @Override - public void run() { + public void work() { long startTimeMs = 0; Thread.currentThread().setName("CacheReplicationMonitor(" + System.identityHashCode(this) + ")"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java index 798b5fb5966f7..4526d14a73b4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports.DiskOp; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Timer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -152,7 +153,7 @@ public void checkAndUpdateReportIfNecessary() { public void updateSlowDiskReportAsync(long now) { if (isUpdateInProgress.compareAndSet(false, true)) { lastUpdateTime = now; - new Thread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { slowDisksReport = getSlowDisks(diskIDLatencyMap, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 93303bcf807de..738eded1730a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -53,6 +53,7 @@ import org.apache.hadoop.io.nativeio.NativeIOException; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; @@ -849,8 +850,8 @@ private void deleteAsync(File curDir) throws IOException { deleteDir(curTmp); } rename(curDir, curTmp); - new Thread("Async Delete Current.tmp") { - public void run() { + new SubjectInheritingThread("Async Delete Current.tmp") { + public void work() { try { deleteDir(curTmp); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 45eeac85d6b36..2ceca34edd954 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -75,6 +75,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.apache.hadoop.classification.VisibleForTesting; @@ -599,7 +600,7 @@ void start() { //Thread is started already return; } - bpThread = new Thread(this); + bpThread = new SubjectInheritingThread(this); bpThread.setDaemon(true); // needed for JUnit testing if (lifelineSender != null) { @@ -1078,7 +1079,7 @@ public void run() { } public void start() { - lifelineThread = new Thread(this, + lifelineThread = new SubjectInheritingThread(this, formatThreadName("lifeline", lifelineNnAddr)); lifelineThread.setDaemon(true); lifelineThread.setUncaughtExceptionHandler( @@ -1384,7 +1385,7 @@ public long monotonicNow() { /** * CommandProcessingThread that process commands asynchronously. */ - class CommandProcessingThread extends Thread { + class CommandProcessingThread extends SubjectInheritingThread { private final BPServiceActor actor; private final BlockingQueue queue; @@ -1396,7 +1397,7 @@ class CommandProcessingThread extends Thread { } @Override - public void run() { + public void work() { try { processQueue(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index ef778791cfd9c..3a1b1e07f3682 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -262,6 +262,7 @@ import org.apache.hadoop.tracing.TraceUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tracing.Tracer; import org.eclipse.jetty.util.ajax.JSON; @@ -3855,8 +3856,8 @@ public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException // Asynchronously start the shutdown process so that the rpc response can be // sent back. - Thread shutdownThread = new Thread("Async datanode shutdown thread") { - @Override public void run() { + Thread shutdownThread = new SubjectInheritingThread("Async datanode shutdown thread") { + @Override public void work() { if (!shutdownForUpgrade) { // Delay the shutdown a bit if not doing for restart. try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index d8f1e23ec379b..77e30b85675e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,7 +53,7 @@ * VolumeScanner scans a single volume. Each VolumeScanner has its own thread. *

They are all managed by the DataNode's BlockScanner. */ -public class VolumeScanner extends Thread { +public class VolumeScanner extends SubjectInheritingThread { public static final Logger LOG = LoggerFactory.getLogger(VolumeScanner.class); @@ -633,7 +634,7 @@ private synchronized ExtendedBlock popNextSuspectBlock() { } @Override - public void run() { + public void work() { // Record the minute on which the scanner started. this.startMinute = TimeUnit.MINUTES.convert(Time.monotonicNow(), TimeUnit.MILLISECONDS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index e5b23bb60e516..f6caea96346fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -109,7 +110,7 @@ public Thread newThread(Runnable r) { synchronized (this) { thisIndex = counter++; } - Thread t = new Thread(r); + Thread t = new SubjectInheritingThread(r); t.setName("Async disk worker #" + thisIndex + " for volume " + volume); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 262a24bd3aa45..a3d54865de048 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -49,6 +49,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; class FsVolumeList { private final CopyOnWriteArrayList volumes = @@ -260,8 +261,8 @@ void getAllVolumesMap(final String bpid, new ConcurrentHashMap(); List replicaAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new Thread() { - public void run() { + Thread t = new SubjectInheritingThread() { + public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Adding replicas to map for block pool " + bpid + " on volume " + v + "..."); @@ -507,8 +508,8 @@ void addBlockPool(final String bpid, final Configuration conf) throws IOExceptio new ConcurrentHashMap(); List blockPoolAddingThreads = new ArrayList(); for (final FsVolumeImpl v : volumes) { - Thread t = new Thread() { - public void run() { + Thread t = new SubjectInheritingThread() { + public void work() { try (FsVolumeReference ref = v.obtainReference()) { FsDatasetImpl.LOG.info("Scanning block pool " + bpid + " on volume " + v + "..."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java index 0d42ae99e358e..02b78b24ca2cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -82,7 +83,7 @@ private void addExecutorForVolume(final String storageId) { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(threadGroup, r); + Thread t = new SubjectInheritingThread(threadGroup, r); t.setName("Async RamDisk lazy persist worker " + " for volume with id " + storageId); return t; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 115e9485fa0a9..8316a4c52293a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -78,7 +79,7 @@ private boolean isSyncThreadAlive() { private void startSyncThread() { synchronized(syncThreadLock) { if (!isSyncThreadAlive()) { - syncThread = new Thread(this, this.getClass().getSimpleName()); + syncThread = new SubjectInheritingThread(this, this.getClass().getSimpleName()); syncThread.start(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index fa321fe85e57b..e6a6fbfc0a8ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -76,6 +76,7 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Preconditions; @@ -1247,7 +1248,7 @@ private synchronized void saveFSImageInAllDirs(FSNamesystem source, = storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) { StorageDirectory sd = it.next(); FSImageSaver saver = new FSImageSaver(ctx, sd, nnf); - Thread saveThread = new Thread(saver, saver.toString()); + Thread saveThread = new SubjectInheritingThread(saver, saver.toString()); saveThreads.add(saveThread); saveThread.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index edacb7eaafd00..c84f3266dcfca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -75,6 +75,7 @@ import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @@ -184,7 +185,7 @@ public LoaderContext getLoaderContext() { * Thread to compute the MD5 of a file as this can be in parallel while * loading the image without interfering much. */ - private static class DigestThread extends Thread { + private static class DigestThread extends SubjectInheritingThread { /** * Exception thrown when computing the digest if it cannot be calculated. @@ -219,7 +220,7 @@ public IOException getException() { } @Override - public void run() { + public void work() { try { digest = MD5FileUtils.computeMd5ForFile(file); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index d48941203d3f0..7f1ed20416341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -103,6 +103,7 @@ import org.apache.hadoop.util.GcTimeMonitor.Builder; import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.util.Timer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1076,7 +1077,7 @@ public FileSystem run() throws IOException { return dfs; } }); - this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); + this.emptier = new SubjectInheritingThread(new Trash(fs, conf).getEmptier(), "Trash Emptier"); this.emptier.setDaemon(true); this.emptier.start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index cf416307f47d4..472f1536526cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -37,6 +37,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.Timer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -475,7 +476,7 @@ void sleep(long sleepTimeMillis) throws InterruptedException { * The thread which does the actual work of tailing edits journals and * applying the transactions to the FSNS. */ - private class EditLogTailerThread extends Thread { + private class EditLogTailerThread extends SubjectInheritingThread { private volatile boolean shouldRun = true; private EditLogTailerThread() { @@ -487,7 +488,7 @@ private void setShouldRun(boolean shouldRun) { } @Override - public void run() { + public void work() { SecurityUtil.doAsLoginUserOrFatal( new PrivilegedAction() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index e240921f67066..e93a384c99d37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -50,6 +50,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; @@ -386,7 +387,7 @@ private long countUncheckpointedTxns() { img.getStorage().getMostRecentCheckpointTxId(); } - private class CheckpointerThread extends Thread { + private class CheckpointerThread extends SubjectInheritingThread { private volatile boolean shouldRun = true; private volatile long preventCheckpointsUntil = 0; @@ -399,7 +400,7 @@ private void setShouldRun(boolean shouldRun) { } @Override - public void run() { + public void work() { // We have to make sure we're logged in as far as JAAS // is concerned, in order to use kerberized SSL properly. SecurityUtil.doAsLoginUserOrFatal( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index 9a4d866e117d1..20b2641648cfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -51,6 +51,7 @@ import org.junit.jupiter.api.Test; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -448,7 +449,7 @@ void start() { Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING)); if (thread.get() == null) { - final Thread t = new Thread(null, new Runnable() { + final Thread t = new SubjectInheritingThread(null, new Runnable() { @Override public void run() { for(State s; !(s = checkErrorState()).isTerminated;) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index f277b1a37b8d6..5462d190c105e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -521,8 +522,8 @@ public boolean skipRollingRestartWait() { .getWrappedStream(); final AtomicBoolean running = new AtomicBoolean(true); final AtomicBoolean failed = new AtomicBoolean(false); - Thread t = new Thread() { - public void run() { + SubjectInheritingThread t = new SubjectInheritingThread() { + public void work() { while (running.get()) { try { out.write("test".getBytes()); @@ -866,7 +867,7 @@ public Boolean get() { dataNodes[0].shutdown(); // Shutdown the second datanode when the pipeline is closing. - new Thread(() -> { + new SubjectInheritingThread(() -> { try { GenericTestUtils.waitFor(new Supplier() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 8eb2f588228f0..a1eb0c56af914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -90,6 +90,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -729,7 +730,7 @@ private boolean busyTest(int xcievers, int threads, int fileLen, int timeWin, in Counter counter = new Counter(0); for (int i = 0; i < threads; ++i ) { DFSClientReader reader = new DFSClientReader(file1, cluster, hash_sha, fileLen, counter); - readers[i] = new Thread(reader); + readers[i] = new SubjectInheritingThread(reader); readers[i].start(); } @@ -1018,7 +1019,7 @@ public static void namenodeRestartTest(final Configuration conf, assertFalse(HdfsUtils.isHealthy(uri)); //namenode is down, continue writing file4 in a thread - final Thread file4thread = new Thread(new Runnable() { + final Thread file4thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1037,7 +1038,7 @@ public void run() { file4thread.start(); //namenode is down, read the file in a thread - final Thread reader = new Thread(new Runnable() { + final Thread reader = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1057,7 +1058,7 @@ public void run() { //namenode is down, create another file in a thread final Path file3 = new Path(dir, "file"); - final Thread thread = new Thread(new Runnable() { + final Thread thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1072,7 +1073,7 @@ public void run() { thread.start(); //restart namenode in a new thread - new Thread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1125,7 +1126,7 @@ public void run() { assertFalse(HdfsUtils.isHealthy(uri)); //leave safe mode in a new thread - new Thread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -1306,7 +1307,7 @@ public void delayWhenRenewLeaseTimeout() { out1.write(new byte[256]); - Thread closeThread = new Thread(new Runnable() { + Thread closeThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { //1. trigger get LeaseRenewer lock diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java index d52b53d543206..f82bb16ee84c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java @@ -60,6 +60,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -339,7 +340,7 @@ public void testCongestionAckDelay() { AtomicBoolean isDelay = new AtomicBoolean(true); // ResponseProcessor needs the dataQueue for the next step. - new Thread(() -> { + new SubjectInheritingThread(() -> { for (int i = 0; i < 10; i++) { // In order to ensure that other threads run for a period of time to prevent affecting // the results. @@ -376,7 +377,7 @@ public void testCongestionAckDelay() { // The purpose of adding packets to the dataQueue is to make the DataStreamer run // normally and judge whether to enter the sleep state according to the congestion. - new Thread(() -> { + new SubjectInheritingThread(() -> { for (int i = 0; i < 100; i++) { packet[i] = mock(DFSPacket.class); dataQueue.add(packet[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 2598dd44a374f..3188185a3e1c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -70,6 +70,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.slf4j.event.Level; @@ -948,9 +949,9 @@ public void testTailWithFresh() throws Exception { final ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); - final Thread tailer = new Thread() { + final SubjectInheritingThread tailer = new SubjectInheritingThread() { @Override - public void run() { + public void work() { final String[] argv = new String[]{"-tail", "-f", testFile.toString()}; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index c9c7020ed6c95..1b49267ab8942 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -61,7 +62,7 @@ public class TestDatanodeDeath { // // an object that does a bunch of transactions // - static class Workload extends Thread { + static class Workload extends SubjectInheritingThread { private final short replication; private final int numberOfFiles; private final int id; @@ -81,7 +82,7 @@ static class Workload extends Thread { // create a bunch of files. Write to them and then verify. @Override - public void run() { + public void work() { System.out.println("Workload starting "); for (int i = 0; i < numberOfFiles; i++) { Path filename = new Path(id + "." + i); @@ -210,7 +211,7 @@ private static void checkData(byte[] actual, int from, byte[] expected, String m * a block do not get killed (otherwise the file will be corrupt and the * test will fail). */ - class Modify extends Thread { + class Modify extends SubjectInheritingThread { volatile boolean running; final MiniDFSCluster cluster; final Configuration conf; @@ -222,7 +223,7 @@ class Modify extends Thread { } @Override - public void run() { + public void work() { while (running) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java index df3fc4f8c4370..c7436f0f2fe31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeadNodeDetection.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -456,7 +457,7 @@ public void sync() { } private void startWaitForDeadNodeThread(DFSClient dfsClient, int size) { - new Thread(() -> { + new SubjectInheritingThread(() -> { DeadNodeDetector deadNodeDetector = dfsClient.getClientContext().getDeadNodeDetector(); while (deadNodeDetector.clearAndGetDetectedDeadNodes().size() != size) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 63f8dc226980b..9715083c373b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -86,6 +86,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -864,7 +865,7 @@ public void testDecommissionWithOpenfileReporting() closedFileSet, openFilesMap, maxDnOccurance); final AtomicBoolean stopRedundancyMonitor = new AtomicBoolean(false); - Thread monitorThread = new Thread(new Runnable() { + Thread monitorThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while (!stopRedundancyMonitor.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index 16e1ea25b4b7a..8a8361661f089 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.MethodOrderer; @@ -259,8 +260,8 @@ public void testDecommissionWithURBlockForSameBlockGroup() throws Exception { // Decommission node in a new thread. Verify that node is decommissioned. final CountDownLatch decomStarted = new CountDownLatch(0); - Thread decomTh = new Thread() { - public void run() { + SubjectInheritingThread decomTh = new SubjectInheritingThread() { + public void work() { try { decomStarted.countDown(); decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED); @@ -995,7 +996,7 @@ public void testDecommissionWithMissingBlock() throws Exception { // Handle decommission nodes in a new thread. // Verify that nodes are decommissioned. final CountDownLatch decomStarted = new CountDownLatch(0); - new Thread( + new SubjectInheritingThread( () -> { try { decomStarted.countDown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index b7916ec560730..f74ac5ee33288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -42,6 +42,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -372,7 +373,7 @@ public void testSimpleAppend2() throws Exception { // // an object that does a bunch of appends to files // - class Workload extends Thread { + class Workload extends SubjectInheritingThread { private final int id; private final MiniDFSCluster cluster; private final boolean appendToNewBlock; @@ -385,7 +386,7 @@ class Workload extends Thread { // create a bunch of files. Write to them and then verify. @Override - public void run() { + public void work() { System.out.println("Workload " + id + " starting... "); for (int i = 0; i < numAppendsPerThread; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java index 5b3a96305a77d..936a90d9e4da0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.mockito.invocation.InvocationOnMock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; @@ -552,9 +553,9 @@ public HdfsFileStatus answer(InvocationOnMock invocation){ DFSClientAdapter.setDFSClient(fs, spyClient); // Create two threads for doing appends to the same file. - Thread worker1 = new Thread() { + SubjectInheritingThread worker1 = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { doSmallAppends(file, fs, 20); } catch (IOException e) { @@ -562,9 +563,9 @@ public void run() { } }; - Thread worker2 = new Thread() { + SubjectInheritingThread worker2 = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { doSmallAppends(file, fs, 20); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index b10439b248bfb..52182f98ddad1 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -165,9 +166,9 @@ public void testRecoverFinalizedBlock() throws Throwable { // write 1/2 block AppendTestUtil.write(stm, 0, 4096); final AtomicReference err = new AtomicReference(); - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { stm.close(); } catch (Throwable t) { @@ -237,9 +238,9 @@ public void testCompleteOtherLeaseHoldersFile() throws Throwable { // write 1/2 block AppendTestUtil.write(stm, 0, 4096); final AtomicReference err = new AtomicReference(); - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { stm.close(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java index a8d3c52fc6fef..77126a99d15e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java @@ -38,6 +38,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; @@ -221,7 +222,7 @@ public void testImmediateReadOfNewFile() final AtomicReference errorMessage = new AtomicReference(); final FSDataOutputStream out = fileSystem.create(file); - final Thread writer = new Thread(new Runnable() { + final Thread writer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -241,7 +242,7 @@ public void run() { } }); - Thread opener = new Thread(new Runnable() { + Thread opener = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -346,7 +347,7 @@ private void runTestUnfinishedBlockCRCError( final AtomicBoolean writerStarted = new AtomicBoolean(false); final AtomicBoolean error = new AtomicBoolean(false); - final Thread writer = new Thread(new Runnable() { + final Thread writer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -379,7 +380,7 @@ public void run() { } } }); - Thread tailer = new Thread(new Runnable() { + Thread tailer = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java index 41b5b340c8805..6d1d6312515b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -113,7 +114,7 @@ public void testClientTriggeredLeaseRecovery() throws Exception { } } - static class SlowWriter extends Thread { + static class SlowWriter extends SubjectInheritingThread { final FileSystem fs; final Path filepath; boolean running = true; @@ -125,7 +126,7 @@ static class SlowWriter extends Thread { } @Override - public void run() { + public void work() { FSDataOutputStream out = null; int i = 0; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index 48666f68dfdd6..023013c02dd1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -35,6 +35,7 @@ import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; /** @@ -71,7 +72,7 @@ private void initBuffer(int size) { toWrite = AppendTestUtil.randomBytes(seed, size); } - private class WriterThread extends Thread { + private class WriterThread extends SubjectInheritingThread { private final FSDataOutputStream stm; private final AtomicReference thrown; private final int numWrites; @@ -87,7 +88,7 @@ public WriterThread(FSDataOutputStream stm, } @Override - public void run() { + public void work() { try { countdown.await(); for (int i = 0; i < numWrites && thrown.get() == null; i++) { @@ -162,9 +163,9 @@ public void testHflushWhileClosing() throws Throwable { final AtomicReference thrown = new AtomicReference(); try { for (int i = 0; i < 10; i++) { - Thread flusher = new Thread() { + SubjectInheritingThread flusher = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { while (true) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index 06d8aec4ffcbd..7536d7d4870fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -189,7 +190,7 @@ public int pRead(DFSInputStream dis, byte[] target, int startOff, int len) /** * A worker to do one "unit" of read. */ - static class ReadWorker extends Thread { + static class ReadWorker extends SubjectInheritingThread { static public final int N_ITERATIONS = 1024; @@ -215,7 +216,7 @@ static class ReadWorker extends Thread { * Randomly do one of (1) Small read; and (2) Large Pread. */ @Override - public void run() { + public void work() { for (int i = 0; i < N_ITERATIONS; ++i) { int startOff = rand.nextInt((int) fileSize); int len = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java index 8f5ba9018dfa7..9b1b6c50b7e4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.log4j.Level; import org.junit.jupiter.api.Assertions; @@ -159,7 +160,7 @@ public void testInterruptReader() throws Exception { final FSDataInputStream in = fs.open(file); AtomicBoolean readInterrupted = new AtomicBoolean(false); - final Thread reader = new Thread(new Runnable() { + final Thread reader = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java index a52eba866c07f..995652ef61c64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -244,7 +245,7 @@ static void sleepSeconds(final int waittime) throws InterruptedException { Thread.sleep(waittime * 1000L); } - static class SlowWriter extends Thread { + static class SlowWriter extends SubjectInheritingThread { private final Path filepath; private final HdfsDataOutputStream out; private final long sleepms; @@ -258,7 +259,7 @@ static class SlowWriter extends Thread { this.sleepms = sleepms; } - @Override public void run() { + @Override public void work() { int i = 0; try { sleep(sleepms); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index e2df609095567..966b585ec449b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure.Policy; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.slf4j.event.Level; @@ -222,7 +223,7 @@ static void sleepSeconds(final int waittime) throws InterruptedException { Thread.sleep(waittime * 1000L); } - static class SlowWriter extends Thread { + static class SlowWriter extends SubjectInheritingThread { final Path filepath; final HdfsDataOutputStream out; final long sleepms; @@ -237,7 +238,7 @@ static class SlowWriter extends Thread { } @Override - public void run() { + public void work() { int i = 0; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java index 810f7e1864d17..a7392d2acd6ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java @@ -73,6 +73,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -235,7 +236,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new SubjectInheritingThread(readerRunnable); threads[i].start(); } Thread.sleep(500); @@ -334,7 +335,7 @@ public void run() { }; Thread threads[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new SubjectInheritingThread(readerRunnable); threads[i].start(); } gotFailureLatch.await(); @@ -640,7 +641,7 @@ public void run() { } } }; - Thread thread = new Thread(readerRunnable); + Thread thread = new SubjectInheritingThread(readerRunnable); thread.start(); // While the thread is reading, send it interrupts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java index 2149b924d1244..a8d09ab159801 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java @@ -34,6 +34,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -106,7 +107,7 @@ private long addOneDataNode(Configuration conf) throws Exception { } private Thread newBalancerService(Configuration conf, String[] args) { - return new Thread(new Runnable() { + return new SubjectInheritingThread(new Runnable() { @Override public void run() { Tool cli = new Balancer.Cli(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 0ef9c4e3cb55c..c289a611d0cbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; @@ -1524,7 +1525,7 @@ public void testAsyncIBR() throws Exception { Thread[] writers = new Thread[numWriters]; for (int i=0; i < writers.length; i++) { final Path p = new Path("/writer"+i); - writers[i] = new Thread(new Runnable() { + writers[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java index 0dfe3ae509752..5e748d5e02161 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java @@ -70,6 +70,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -915,7 +916,7 @@ private Block findBlock(Path path, long size) throws IOException { return ret; } - private class BlockChecker extends Thread { + private class BlockChecker extends SubjectInheritingThread { final Path filePath; public BlockChecker(final Path filePath) { @@ -923,7 +924,7 @@ public BlockChecker(final Path filePath) { } @Override - public void run() { + public void work() { try { startDNandWait(filePath, true); } catch (Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index da89b85f6de78..9b08088847842 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -87,6 +87,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -337,7 +338,7 @@ public void blockUtilSendFullBlockReport() { }); countBlockReportItems(FAKE_BLOCK, mockNN1, blocks); - addNewBlockThread = new Thread(() -> { + addNewBlockThread = new SubjectInheritingThread(() -> { for (int i = 0; i < totalTestBlocks; i++) { SimulatedFSDataset fsDataset = (SimulatedFSDataset) mockFSDataset; SimulatedStorage simulatedStorage = fsDataset.getStorages().get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 570d41a69dba4..8f298140908dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -96,6 +96,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -917,7 +918,7 @@ private void testStopWorker(final TestStopWorkerRunnable tswr) final RecoveringBlock recoveringBlock = Iterators.get(recoveringBlocks.iterator(), 0); final ExtendedBlock block = recoveringBlock.getBlock(); - Thread slowWriterThread = new Thread(new Runnable() { + Thread slowWriterThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -944,7 +945,7 @@ public void run() { progressParent.uninterruptiblyAcquire(60000); // Start a worker thread which will attempt to stop the writer. - Thread stopWriterThread = new Thread(new Runnable() { + Thread stopWriterThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java index 74d18b55c6cdc..a51f3864dbb35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery2.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.TestName; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -248,7 +249,7 @@ public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() final DataNode dataNode = cluster.getDataNodes().get(0); final AtomicBoolean recoveryInitResult = new AtomicBoolean(true); - Thread recoveryThread = new Thread(() -> { + Thread recoveryThread = new SubjectInheritingThread(() -> { try { DatanodeInfo[] locations = block.getLocations(); final BlockRecoveryCommand.RecoveringBlock recoveringBlock = @@ -367,7 +368,7 @@ public void testEcRecoverBlocks() throws Throwable { // write 5MB File AppendTestUtil.write(stm, 0, 1024 * 1024 * 5); final AtomicReference err = new AtomicReference<>(); - Thread t = new Thread(() -> { + Thread t = new SubjectInheritingThread(() -> { try { stm.close(); } catch (Throwable t1) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index babce8d5833cf..40d7e5f86a5d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -51,6 +51,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -567,7 +568,7 @@ public void testAddVolumesConcurrently() // Thread to list all storage available at DataNode, // when the volumes are being added in parallel. - final Thread listStorageThread = new Thread(new Runnable() { + final SubjectInheritingThread listStorageThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while (addVolumeCompletionLatch.getCount() != newVolumeCount) { @@ -591,7 +592,7 @@ public void run() { public Object answer(InvocationOnMock invocationOnMock) throws Throwable { final Random r = new Random(); Thread addVolThread = - new Thread(new Runnable() { + new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -928,7 +929,7 @@ public void logDelaySendingAckToUpstream( final DataNode dataNode = dn; final CyclicBarrier reconfigBarrier = new CyclicBarrier(2); - Thread reconfigThread = new Thread(() -> { + Thread reconfigThread = new SubjectInheritingThread(() -> { try { reconfigBarrier.await(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index f4d66f8c8d001..ef5820bdf9d34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -82,6 +82,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -441,7 +442,7 @@ public void delayWhenOfferServiceHoldLock() { BPServiceActor actor = service.getBPServiceActors().get(0); DatanodeRegistration bpRegistration = actor.getBpRegistration(); - Thread register = new Thread(() -> { + Thread register = new SubjectInheritingThread(() -> { try { service.registrationSucceeded(actor, bpRegistration); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java index 81f6020088965..02b444df8b850 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -85,7 +86,7 @@ public void testBaseFunc() { @Test @Timeout(value = 5) public void testAcquireWriteLockError() throws InterruptedException { - Thread t = new Thread(() -> { + Thread t = new SubjectInheritingThread(() -> { manager.readLock(LockLevel.BLOCK_POOl, "test"); manager.writeLock(LockLevel.BLOCK_POOl, "test"); }); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java index c6b5592c3c01d..eea64bef1f859 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataXceiverBackwardsCompat.java @@ -34,6 +34,7 @@ import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -95,7 +96,7 @@ public NullDataNode(Configuration conf, OutputStream out, int port) throws any(StorageType.class), any(String.class), any(ExtendedBlock.class), anyBoolean()); - new Thread(new NullServer(port)).start(); + new SubjectInheritingThread(new NullServer(port)).start(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java index 200c30a5abb0e..3dcff8c54aa17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -381,7 +382,7 @@ public void testConcurrentAddBlockPool() throws InterruptedException, IOException { final String[] bpids = {"BP-TEST1-", "BP-TEST2-"}; final SimulatedFSDataset fsdataset = new SimulatedFSDataset(null, conf); - class AddBlockPoolThread extends Thread { + class AddBlockPoolThread extends SubjectInheritingThread { private int id; private IOException ioe; public AddBlockPoolThread(int id) { @@ -394,7 +395,7 @@ public void test() throws InterruptedException, IOException { throw ioe; } } - public void run() { + public void work() { for (int i=0; i < 10000; i++) { // add different block pools concurrently String newbpid = bpids[id] + i; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 883290ef41c4e..30a90fac5cecc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -84,6 +84,7 @@ import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; @@ -648,9 +649,9 @@ public void testConcurrentWriteAndDeleteBlock() throws Exception { Random random = new Random(); // Random write block and delete half of them. for (int i = 0; i < threadCount; i++) { - Thread thread = new Thread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { String bpid = BLOCK_POOL_IDS[random.nextInt(BLOCK_POOL_IDS.length)]; for (int blockId = 0; blockId < numBlocks; blockId++) { @@ -931,8 +932,8 @@ public void testRemoveVolumeBeingWritten() throws Exception { final CountDownLatch blockReportReceivedLatch = new CountDownLatch(1); final CountDownLatch volRemoveStartedLatch = new CountDownLatch(1); final CountDownLatch volRemoveCompletedLatch = new CountDownLatch(1); - class BlockReportThread extends Thread { - public void run() { + class BlockReportThread extends SubjectInheritingThread { + public void work() { // Lets wait for the volume remove process to start try { volRemoveStartedLatch.await(); @@ -946,8 +947,8 @@ public void run() { } } - class ResponderThread extends Thread { - public void run() { + class ResponderThread extends SubjectInheritingThread { + public void work() { try (ReplicaHandler replica = dataset .createRbw(StorageType.DEFAULT, null, eb, false)) { LOG.info("CreateRbw finished"); @@ -973,8 +974,8 @@ public void run() { } } - class VolRemoveThread extends Thread { - public void run() { + class VolRemoveThread extends SubjectInheritingThread { + public void work() { Set volumesToRemove = new HashSet<>(); try { volumesToRemove.add(dataset.getVolume(eb).getStorageLocation()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java index 6c00e9690bb91..eedca0b5a7c42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java @@ -42,6 +42,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -386,9 +387,9 @@ public void testAddRplicaProcessorForAddingReplicaInMap() throws Exception { ExecutorService pool = Executors.newFixedThreadPool(10); List> futureList = new ArrayList<>(); for (int i = 0; i < 100; i++) { - Thread thread = new Thread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { for (int j = 0; j < 10; j++) { try { DFSTestUtil.createFile(fs, new Path("File_" + getName() + j), 10, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index f1f57a9714f02..e4110e436f960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -196,7 +197,7 @@ public void run() { Thread threads[] = new Thread[NUM_TASKS]; for (int i = 0; i < NUM_TASKS; i++) { - threads[i] = new Thread(readerRunnable); + threads[i] = new SubjectInheritingThread(readerRunnable); threads[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java index c29c2bf1bc855..1c715c755c9f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaCachingGetSpaceUsed.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -175,11 +176,11 @@ public void testFsDatasetImplDeepCopyReplica() { modifyThread.setShouldRun(false); } - private class ModifyThread extends Thread { + private class ModifyThread extends SubjectInheritingThread { private boolean shouldRun = true; @Override - public void run() { + public void work() { FSDataOutputStream os = null; while (shouldRun) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index fd753386a11d7..480149a9a56f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; @@ -56,7 +57,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.ArrayList; +import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -717,8 +718,8 @@ public Object answer(InvocationOnMock invocation) { getTrimmedStringCollection(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY)); final String newDirs = oldDirs.get(0); LOG.info("Reconfigure newDirs:" + newDirs); - Thread reconfigThread = new Thread() { - public void run() { + SubjectInheritingThread reconfigThread = new SubjectInheritingThread() { + public void work() { try { LOG.info("Waiting for work plan creation!"); createWorkPlanLatch.await(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index ca29433dbd607..c404ed3707b45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -85,6 +85,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; /** @@ -422,7 +423,7 @@ void printStats() { /** * One of the threads that perform stats operations. */ - private class StatsDaemon extends Thread { + private class StatsDaemon extends SubjectInheritingThread { private final int daemonId; private int opsPerThread; private String arg1; // argument passed to executeOp() @@ -438,7 +439,7 @@ private class StatsDaemon extends Thread { } @Override - public void run() { + public void work() { localNumOpsExecuted = 0; localCumulativeTime = 0; arg1 = statsOp.getExecutionArgument(daemonId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 61c147e6e5ea7..4afaa049927b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -39,6 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -315,7 +316,7 @@ public void testAuditLoggerWithCallContext() throws IOException { .build(); CallerContext.setCurrent(context); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - Thread child = new Thread(new Runnable() + Thread child = new SubjectInheritingThread(new Runnable() { @Override public void run() { @@ -342,7 +343,7 @@ public void run() { .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING)) .build(); LOG.info("Set current caller context as {}", CallerContext.getCurrent()); - child = new Thread(new Runnable() + child = new SubjectInheritingThread(new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 73a4a52611b44..62d1feb67c20d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -88,6 +88,7 @@ import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -2611,7 +2612,7 @@ private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary /** * A utility class to perform a checkpoint in a different thread. */ - private static class DoCheckpointThread extends Thread { + private static class DoCheckpointThread extends SubjectInheritingThread { private final SecondaryNameNode snn; private volatile Throwable thrown = null; @@ -2620,7 +2621,7 @@ private static class DoCheckpointThread extends Thread { } @Override - public void run() { + public void work() { try { snn.doCheckpoint(); } catch (Throwable t) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java index d9002f83948f8..102420ed26563 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java @@ -59,6 +59,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.Whitebox; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.Mockito; @@ -108,7 +109,7 @@ private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception { "/"), "s1"); } - Thread deleteThread = new DeleteThread(fs, filePath); + SubjectInheritingThread deleteThread = new DeleteThread(fs, filePath); deleteThread.start(); try { @@ -148,7 +149,7 @@ public DatanodeStorageInfo[] chooseTarget(String srcPath, } } - private class DeleteThread extends Thread { + private class DeleteThread extends SubjectInheritingThread { private FileSystem fs; private Path path; @@ -158,7 +159,7 @@ private class DeleteThread extends Thread { } @Override - public void run() { + public void work() { try { Thread.sleep(1000); LOG.info("Deleting" + path); @@ -177,7 +178,7 @@ public void run() { } } - private class RenameThread extends Thread { + private class RenameThread extends SubjectInheritingThread { private FileSystem fs; private Path from; private Path to; @@ -189,7 +190,7 @@ private class RenameThread extends Thread { } @Override - public void run() { + public void work() { try { Thread.sleep(1000); LOG.info("Renaming " + from + " to " + to); @@ -456,14 +457,14 @@ public void testOpenRenameRace() throws Exception { // 6.release writeLock, it's fair lock so open thread gets read lock. // 7.open thread unlocks, rename gets write lock and does rename. // 8.rename thread unlocks, open thread gets write lock and update time. - Thread open = new Thread(() -> { + Thread open = new SubjectInheritingThread(() -> { try { openSem.release(); fsn.getBlockLocations("foo", src, 0, 5); } catch (IOException e) { } }); - Thread rename = new Thread(() -> { + Thread rename = new SubjectInheritingThread(() -> { try { openSem.acquire(); renameSem.release(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 84e02d273a89d..d7268e95fc7ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -90,6 +90,7 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.LogManager; import org.apache.log4j.spi.LoggingEvent; @@ -501,7 +502,7 @@ private void testEditLog(int initialSize) throws IOException { for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS, i*NUM_TRANSACTIONS); - threadId[i] = new Thread(trans, "TransactionThread-" + i); + threadId[i] = new SubjectInheritingThread(trans, "TransactionThread-" + i); threadId[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 8d3effb511fc2..beda93568e179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -67,6 +67,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.mockito.ArgumentMatcher; import org.slf4j.event.Level; import org.junit.jupiter.api.Test; @@ -205,7 +206,7 @@ private void startTransactionWorkers(MiniDFSCluster cluster, // Create threads and make them run transactions concurrently. for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(cluster, caughtErr); - new Thread(trans, "TransactionThread-" + i).start(); + new SubjectInheritingThread(trans, "TransactionThread-" + i).start(); workers.add(trans); } } @@ -425,9 +426,9 @@ public void testSaveImageWhileSyncInProgress() throws Exception { new AtomicReference(); final CountDownLatch waitToEnterFlush = new CountDownLatch(1); - final Thread doAnEditThread = new Thread() { + final SubjectInheritingThread doAnEditThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { LOG.info("Starting mkdirs"); namesystem.mkdirs("/test", @@ -518,9 +519,9 @@ public void testSaveRightBeforeSync() throws Exception { new AtomicReference(); final CountDownLatch sleepingBeforeSync = new CountDownLatch(1); - final Thread doAnEditThread = new Thread() { + final SubjectInheritingThread doAnEditThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { LOG.info("Starting setOwner"); namesystem.writeLock(RwLockMode.FS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index 0ffe35ca3e150..261c297fcf646 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -29,6 +29,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -268,9 +269,9 @@ public void testFSReadLockLongHoldingReport() throws Exception { // Track but do not Report if it's held for a long time when re-entering // read lock but time since last report does not exceed the suppress // warning interval - Thread tLong = new Thread() { + SubjectInheritingThread tLong = new SubjectInheritingThread() { @Override - public void run() { + public void work() { fsnLock.readLock(); // Add one lock hold which is the longest, but occurs under a different // stack trace, to ensure this is the one that gets logged @@ -298,7 +299,7 @@ public void run() { fsnLock.readUnlock(); // Assert that stack trace eventually logged is the one for the longest hold String stackTracePatternString = - String.format("INFO.+%s(.+\n){5}\\Q%%s\\E\\.run", readLockLogStmt); + String.format("INFO.+%s(.+\n){5}\\Q%%s\\E\\.work", readLockLogStmt); Pattern tLongPattern = Pattern.compile( String.format(stackTracePatternString, tLong.getClass().getName())); assertTrue(tLongPattern.matcher(logs.getOutput()).find()); @@ -318,9 +319,9 @@ public void run() { logs.clearOutput(); final CountDownLatch barrier = new CountDownLatch(1); final CountDownLatch barrier2 = new CountDownLatch(1); - Thread t1 = new Thread() { + SubjectInheritingThread t1 = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { fsnLock.readLock(); timer.advance(readLockReportingThreshold + 1); @@ -332,9 +333,9 @@ public void run() { } } }; - Thread t2 = new Thread() { + SubjectInheritingThread t2 = new SubjectInheritingThread() { @Override - public void run() { + public void work () { try { barrier.await(); // Wait until t1 finishes sleeping fsnLock.readLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java index f18ff3020aa05..68f9ca3b525a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java @@ -41,6 +41,7 @@ import org.apache.hadoop.metrics2.impl.ConfigBuilder; import org.apache.hadoop.metrics2.impl.TestMetricsConfig; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.eclipse.jetty.util.ajax.JSON; @@ -55,10 +56,10 @@ public class TestFSNamesystemMBean { * JMX properties. If it can access all the properties, the test is * considered successful. */ - private static class MBeanClient extends Thread { + private static class MBeanClient extends SubjectInheritingThread { private boolean succeeded = false; @Override - public void run() { + public void work() { try { MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 6e7ed2e82f40f..4b507035fa662 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -68,6 +68,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.event.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -255,7 +256,7 @@ public void delay() { DataNodeFaultInjector.set(injector); // Truncate by using different client name. - Thread t = new Thread(() -> { + Thread t = new SubjectInheritingThread(() -> { String hdfsCacheDisableKey = "fs.hdfs.impl.disable.cache"; boolean originCacheDisable = conf.getBoolean(hdfsCacheDisableKey, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index 2dffeaee5bdb9..3a7a882b8bd40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -165,12 +166,12 @@ protected void execute() throws Throwable { * implementation class, the thread is notified: other threads can wait * for it to terminate */ - private abstract class TestThread extends Thread { + private abstract class TestThread extends SubjectInheritingThread { volatile Throwable thrown; protected volatile boolean live = true; @Override - public void run() { + public void work() { try { execute(); } catch (Throwable throwable) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 6c6c2b0008d52..5f5bc122ba9a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.ChunkedArrayList; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -222,7 +223,7 @@ public void testListOpenFilesInHA() throws Exception { final AtomicBoolean failoverCompleted = new AtomicBoolean(false); final AtomicBoolean listOpenFilesError = new AtomicBoolean(false); final int listingIntervalMsec = 250; - Thread clientThread = new Thread(new Runnable() { + Thread clientThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while(!failoverCompleted.get()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 4135cb354e707..0260cee687edb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.MethodOrderer; @@ -222,7 +223,7 @@ public void testMetaSaveOverwrite() throws Exception { } } - class MetaSaveThread extends Thread { + class MetaSaveThread extends SubjectInheritingThread { NamenodeProtocols nnRpc; String filename; public MetaSaveThread(NamenodeProtocols nnRpc, String filename) { @@ -231,7 +232,7 @@ public MetaSaveThread(NamenodeProtocols nnRpc, String filename) { } @Override - public void run() { + public void work() { try { nnRpc.metaSave(filename); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java index a27e4d8676ecb..6e10497352918 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryptionHandler.java @@ -27,6 +27,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.KMSUtil; import org.apache.hadoop.util.StopWatch; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.test.Whitebox; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -177,8 +178,8 @@ public void testThrottleAccumulatingTasks() throws Exception { zst.addTask(mock); } - Thread removeTaskThread = new Thread() { - public void run() { + SubjectInheritingThread removeTaskThread = new SubjectInheritingThread() { + public void work() { try { Thread.sleep(3000); } catch (InterruptedException ie) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index 368a9a8460010..6b6dcfe9da0ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -38,6 +38,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.mockito.invocation.InvocationOnMock; @@ -136,7 +137,7 @@ public void testEditLog() throws IOException { Thread threadId[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS); - threadId[i] = new Thread(trans, "TransactionThread-" + i); + threadId[i] = new SubjectInheritingThread(trans, "TransactionThread-" + i); threadId[i].start(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 6fcaffaa6ae91..00e56a779fb9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -63,6 +63,7 @@ import org.junit.jupiter.api.Timeout; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; public class TestBootstrapStandby { private static final Logger LOG = @@ -409,7 +410,7 @@ public void testRateThrottling() throws Exception { final int timeOut = (int)(imageFile.length() / minXferRatePerMS) + 1; // A very low DFS_IMAGE_TRANSFER_RATE_KEY value won't affect bootstrapping final AtomicBoolean bootStrapped = new AtomicBoolean(false); - new Thread( + new SubjectInheritingThread( new Runnable() { @Override public void run() { @@ -439,7 +440,7 @@ public Boolean get() { // A very low DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY value should // cause timeout bootStrapped.set(false); - new Thread( + new SubjectInheritingThread( new Runnable() { @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java index 09d718fb012d5..e4c59433780b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java @@ -56,6 +56,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; @@ -152,7 +153,7 @@ public void testMsyncSimple() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { // this read will block until roll and tail edits happen. dfs.getFileStatus(testPath); @@ -202,7 +203,7 @@ private void testMsync(boolean autoMsync, long autoMsyncPeriodMs) dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { // After msync, client should have the latest state ID from active. // Therefore, the subsequent getFileStatus call should succeed. @@ -293,7 +294,7 @@ public void testCallFromNewClient() throws Exception { (DistributedFileSystem) FileSystem.get(conf2); dfs2.getClient().getHAServiceState(); - Thread reader = new Thread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { dfs2.getFileStatus(testPath); readStatus.set(1); @@ -334,7 +335,7 @@ public void testUncoordinatedCall() throws Exception { AtomicInteger readStatus = new AtomicInteger(0); // create a separate thread to make a blocking read. - Thread reader = new Thread(() -> { + Thread reader = new SubjectInheritingThread(() -> { try { // this read call will block until server state catches up. But due to // configuration, this will take a very long time. @@ -439,7 +440,7 @@ public void testRpcQueueTimeNumOpsMetrics() throws Exception { dfs.mkdir(testPath, FsPermission.getDefault()); assertSentTo(0); - Thread reader = new Thread(new Runnable() { + Thread reader = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 9006bb47c3d2b..756094e19fa8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -268,9 +269,9 @@ public void testDelegationTokenDuringNNFailover() throws Exception { HAServiceState.STANDBY.toString(), e); } - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { cluster.transitionToActive(1); } catch (Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index 2a011469d5825..44f20bd8265a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -68,6 +68,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.Whitebox; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -142,9 +143,9 @@ public void testClientRetrySafeMode() throws Exception { .getBlockManager()); assertTrue(nn0.getNamesystem().isInStartupSafeMode()); LOG.info("enter safemode"); - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { boolean mkdir = fs.mkdirs(test); LOG.info("mkdir finished, result is " + mkdir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index e2857a4d84dce..f679bcc148b63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -90,6 +90,7 @@ import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.RetryCache.CacheEntry; import org.apache.hadoop.util.LightWeightCache; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -1330,9 +1331,9 @@ public void testClientRetryWithFailover(final AtMostOnceOp op) // set DummyRetryInvocationHandler#block to true DummyRetryInvocationHandler.block.set(true); - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { op.invoke(); Object result = op.getResult(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 6f36d5e19bcdd..e010193bf3eff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -53,6 +53,7 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ThreadUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.log4j.spi.LoggingEvent; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -559,9 +560,9 @@ public void testReadsAllowedDuringCheckpoint() throws Exception { ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); // Perform an RPC that needs to take the write lock. - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { nns[1].getRpcServer().restoreFailedStorage("false"); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 6a62d16d39993..f4d141b3877f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -709,7 +710,7 @@ public void testOpenFileWritingAcrossSnapDeletion() throws Exception { final AtomicBoolean writerError = new AtomicBoolean(false); final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch deleteLatch = new CountDownLatch(1); - Thread t = new Thread(new Runnable() { + Thread t = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index 19fed937ebb53..0b725b93d564c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -62,6 +62,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -563,11 +564,11 @@ public static void main(String[] args) throws Exception { long start = Time.now(); final int iteration = 20; - Thread[] threads = new Thread[threadCount]; + SubjectInheritingThread[] threads = new SubjectInheritingThread[threadCount]; for (int i = 0; i < threadCount; i++) { - threads[i] = new Thread() { + threads[i] = new SubjectInheritingThread() { @Override - public void run() { + public void work() { for (int i = 0; i < iteration; i++) { try { String user = getCurrentUser(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java index 598d18e9f0bb8..c3296ee7b4a07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.util; import org.apache.hadoop.hdfs.server.namenode.AclFeature; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -85,13 +86,13 @@ public void testRefCountMapConcurrently() throws Exception { assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2)); } - class PutThread extends Thread { + class PutThread extends SubjectInheritingThread { private ReferenceCountMap referenceCountMap; PutThread(ReferenceCountMap referenceCountMap) { this.referenceCountMap = referenceCountMap; } @Override - public void run() { + public void work() { for (int i = 0; i < LOOP_COUNTER; i++) { referenceCountMap.put(aclFeature1); referenceCountMap.put(aclFeature2); @@ -99,13 +100,13 @@ public void run() { } }; - class RemoveThread extends Thread { + class RemoveThread extends SubjectInheritingThread { private ReferenceCountMap referenceCountMap; RemoveThread(ReferenceCountMap referenceCountMap) { this.referenceCountMap = referenceCountMap; } @Override - public void run() { + public void work() { for (int i = 0; i < LOOP_COUNTER; i++) { referenceCountMap.remove(aclFeature1); referenceCountMap.remove(aclFeature2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 922876c598d0e..48751695e48c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -57,6 +57,7 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.Whitebox; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.eclipse.jetty.util.ajax.JSON; @@ -296,9 +297,9 @@ public void testRetryWhileNNStartup() throws Exception { final NamenodeProtocols rpcServer = namenode.getRpcServer(); Whitebox.setInternalState(namenode, "rpcServer", null); - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { boolean result = false; FileSystem fs = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 5924a8dedcef3..d11dc5ec2be51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -51,6 +51,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Timeout; import org.opentest4j.TestAbortedException; @@ -325,9 +326,9 @@ public void testTwoStepWriteReadTimeout(TimeoutSource src) throws Exception { private void startSingleTemporaryRedirectResponseThread( final boolean consumeConnectionBacklog) { fs.connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; - serverThread = new Thread() { + serverThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { Socket clientSocket = null; OutputStream out = null; InputStream in = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index 4c245136efb41..1548bcc3c6bd7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -64,6 +64,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -151,7 +152,7 @@ public void serviceStart() throws Exception { HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder(). setDaemon(true).setNameFormat("uber-SubtaskRunner").build()); // create and start an event handling thread - eventHandler = new Thread(new EventHandler(), "uber-EventHandler"); + eventHandler = new SubjectInheritingThread(new EventHandler(), "uber-EventHandler"); // if the job classloader is specified, set it onto the event handler as the // thread context classloader so that it can be used by the event handler // as well as the subtask runner threads diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 6ab06696c19df..16f30c901773a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -65,6 +65,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; @@ -351,7 +352,7 @@ protected void serviceStart() throws Exception { } else if (timelineV2Client != null) { timelineV2Client.start(); } - eventHandlingThread = new Thread(new Runnable() { + eventHandlingThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { JobHistoryEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index eb3583b41bc71..703f0b1f58778 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -134,6 +134,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -739,10 +740,10 @@ private class JobFinishEventHandler implements EventHandler { public void handle(JobFinishEvent event) { // Create a new thread to shutdown the AM. We should not do it in-line // to avoid blocking the dispatcher itself. - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { shutDownJob(); } }.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java index 13389d67efb71..194f844bd7118 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java @@ -33,6 +33,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.util.Clock; import org.slf4j.Logger; @@ -125,7 +126,7 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - lostTaskCheckerThread = new Thread(new PingChecker()); + lostTaskCheckerThread = new SubjectInheritingThread(new PingChecker()); lostTaskCheckerThread.setName("TaskHeartbeatHandler PingChecker"); lostTaskCheckerThread.start(); super.serviceStart(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java index c72e13e648e15..5e444eb1386f0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java @@ -47,6 +47,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -126,7 +127,7 @@ protected void serviceStart() throws Exception { ThreadFactory backingTf = new ThreadFactory() { @Override public Thread newThread(Runnable r) { - Thread thread = new Thread(r); + Thread thread = new SubjectInheritingThread(r); thread.setContextClassLoader(jobClassLoader); return thread; } @@ -136,7 +137,7 @@ public Thread newThread(Runnable r) { ThreadFactory tf = tfBuilder.build(); launcherPool = new HadoopThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new Thread(new Runnable() { + eventHandlingThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { CommitterEvent event = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index d184d9be64bf8..ee7fbde99393c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -43,6 +43,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -285,9 +286,9 @@ protected void serviceStart() throws Exception { Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventHandlingThread = new Thread() { + eventHandlingThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { ContainerLauncherEvent event = null; Set allNodes = new HashSet(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index b836120a8dcb4..ec5561f0ecf71 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; @@ -300,7 +301,7 @@ public void run() { } protected void startAllocatorThread() { - allocatorThread = new Thread(new AllocatorRunnable()); + allocatorThread = new SubjectInheritingThread(new AllocatorRunnable()); allocatorThread.setName("RMCommunicator Allocator"); allocatorThread.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index cf2f90ff1e563..a0f73b0b4ff68 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -62,6 +62,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; @@ -111,7 +112,7 @@ public class RMContainerAllocator extends RMContainerRequestor public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted " + "to make room for pending map attempts"; - private Thread eventHandlingThread; + private SubjectInheritingThread eventHandlingThread; private final AtomicBoolean stopped; static { @@ -246,10 +247,10 @@ protected void serviceInit(Configuration conf) throws Exception { @Override protected void serviceStart() throws Exception { - this.eventHandlingThread = new Thread() { + this.eventHandlingThread = new SubjectInheritingThread() { @SuppressWarnings("unchecked") @Override - public void run() { + public void work() { ContainerAllocatorEvent event; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java index 800ff1809704e..7eb786c5e50e8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java @@ -45,6 +45,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; @@ -219,7 +220,7 @@ public void run() { } } }; - speculationBackgroundThread = new Thread + speculationBackgroundThread = new SubjectInheritingThread (speculationBackgroundCore, "DefaultSpeculator background processing"); speculationBackgroundThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java index 34f4c8c7164cf..3288390e62d2f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java @@ -39,6 +39,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -137,7 +138,7 @@ public void handle(ContainerAllocatorEvent event) { } @Override protected void serviceStart() throws Exception { - thread = new Thread(new Runnable() { + thread = new SubjectInheritingThread(new Runnable() { @Override @SuppressWarnings("unchecked") public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java index eef1a4a10835f..e0d2a323ee631 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java @@ -48,6 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -237,7 +238,7 @@ protected void unregister() { @Override protected void startAllocatorThread() { - allocatorThread = new Thread(); + allocatorThread = new SubjectInheritingThread(); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java index aae1fd0b673f6..c0e149b134840 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java @@ -72,6 +72,7 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,7 +115,7 @@ public ProtocolSignature getProtocolSignature(String protocol, this, protocol, clientVersion, clientMethodsHash); } - private class Job extends Thread implements TaskUmbilicalProtocol { + private class Job extends SubjectInheritingThread implements TaskUmbilicalProtocol { // The job directory on the system: JobClient places job configurations here. // This is analogous to JobTracker's system directory. private Path systemJobDir; @@ -521,7 +522,7 @@ private void runTasks(List runnables, } @Override - public void run() { + public void work() { JobID jobId = profile.getJobID(); JobContext jContext = new JobContextImpl(job, jobId); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java index a40a40ada02d9..4c217a4c7a032 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/CleanupQueue.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; class CleanupQueue { @@ -100,7 +101,7 @@ protected boolean isQueueEmpty() { return (cleanupThread.queue.size() == 0); } - private static class PathCleanupThread extends Thread { + private static class PathCleanupThread extends SubjectInheritingThread { // cleanup queue which deletes files/directories of the paths queued up. private LinkedBlockingQueue queue = @@ -120,7 +121,7 @@ void addToQueue(PathDeletionContext[] contexts) { } } - public void run() { + public void work() { if (LOG.isDebugEnabled()) { LOG.debug(getName() + " started."); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index 4f86f912838fa..f41c94bf4b58b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -74,6 +74,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1549,10 +1550,10 @@ public void flush() throws IOException, ClassNotFoundException, public void close() { } - protected class SpillThread extends Thread { + protected class SpillThread extends SubjectInheritingThread { @Override - public void run() { + public void work() { spillLock.lock(); spillThreadRunning = true; try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java index 6861f1b2cd36d..44a4b41ef85a7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java @@ -72,6 +72,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -952,7 +953,7 @@ void resetDoneFlag() { } public void startCommunicationThread() { if (pingThread == null) { - pingThread = new Thread(this, "communication thread"); + pingThread = new SubjectInheritingThread(this, "communication thread"); pingThread.setDaemon(true); pingThread.start(); } @@ -963,7 +964,7 @@ public void startDiskLimitCheckerThreadIfNeeded() { MRJobConfig.JOB_SINGLE_DISK_LIMIT_BYTES, MRJobConfig.DEFAULT_JOB_SINGLE_DISK_LIMIT_BYTES) >= 0) { try { - diskLimitCheckThread = new Thread(new DiskLimitCheck(conf), + diskLimitCheckThread = new SubjectInheritingThread(new DiskLimitCheck(conf), "disk limit check thread"); diskLimitCheckThread.setDaemon(true); diskLimitCheckThread.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java index 15b0961e57deb..68ae9e97f19d1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Application.java @@ -57,6 +57,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -280,7 +281,7 @@ public static String createDigest(byte[] password, String data) } @VisibleForTesting - public static class PingSocketCleaner extends Thread { + public static class PingSocketCleaner extends SubjectInheritingThread { private final ServerSocket serverSocket; private final int soTimeout; @@ -291,7 +292,7 @@ public static class PingSocketCleaner extends Thread { } @Override - public void run() { + public void work() { LOG.info("PingSocketCleaner started..."); while (!Thread.currentThread().isInterrupted()) { Socket clientSocket = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java index 89c594a89b034..fdfe07768742a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/BinaryProtocol.java @@ -42,6 +42,7 @@ import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,7 +96,7 @@ private enum MessageType { START(0), private static class UplinkReaderThread - extends Thread { + extends SubjectInheritingThread { private DataInputStream inStream; private UpwardProtocol handler; @@ -117,7 +118,7 @@ public void closeConnection() throws IOException { inStream.close(); } - public void run() { + public void work() { while (true) { try { if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java index 803ece7480c0d..61521765d6173 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java @@ -40,6 +40,7 @@ import org.apache.hadoop.mapreduce.lib.map.WrappedMapper; import org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * The Chain class provides all the common functionality for the @@ -296,7 +297,7 @@ private synchronized boolean setIfUnsetThrowable(Throwable th) { return false; } - private class MapRunner extends Thread { + private class MapRunner extends SubjectInheritingThread { private Mapper mapper; private Mapper.Context chainContext; private RecordReader rr; @@ -313,7 +314,7 @@ public MapRunner(Mapper mapper, } @Override - public void run() { + public void work() { if (getThrowable() != null) { return; } @@ -329,7 +330,7 @@ public void run() { } } - private class ReduceRunner extends Thread { + private class ReduceRunner extends SubjectInheritingThread { private Reducer reducer; private Reducer.Context chainContext; private RecordWriter rw; @@ -344,7 +345,7 @@ private class ReduceRunner extends Thread { } @Override - public void run() { + public void work() { try { reducer.run(chainContext); rw.close(chainContext); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java index 382ed959f12a0..cb48a0e4a3435 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.lib.map; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -247,7 +248,7 @@ public float getProgress() { } } - private class MapRunner extends Thread { + private class MapRunner extends SubjectInheritingThread { private Mapper mapper; private Context subcontext; private Throwable throwable; @@ -269,7 +270,7 @@ private class MapRunner extends Thread { } @Override - public void run() { + public void work() { try { mapper.run(subcontext); reader.close(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java index 0e28c3b41c02e..1857406744607 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java @@ -23,10 +23,11 @@ import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class EventFetcher extends Thread { +class EventFetcher extends SubjectInheritingThread { private static final long SLEEP_TIME = 1000; private static final int MAX_RETRIES = 10; private static final int RETRY_PERIOD = 5000; @@ -56,7 +57,7 @@ public EventFetcher(TaskAttemptID reduce, } @Override - public void run() { + public void work() { int failures = 0; LOG.info(reduce + " Thread started: " + getName()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index 59ef95bdd462b..da598e807e2fb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -48,6 +48,7 @@ import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,7 +56,7 @@ import org.apache.hadoop.classification.VisibleForTesting; @VisibleForTesting -public class Fetcher extends Thread { +public class Fetcher extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(Fetcher.class); @@ -187,7 +188,7 @@ public Fetcher(JobConf job, TaskAttemptID reduceId, } } - public void run() { + public void work() { try { while (!stopped && !Thread.currentThread().isInterrupted()) { MapHost host = null; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java index dc563eeab4d0f..9ad5db1a0ba4d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java @@ -71,7 +71,7 @@ public LocalFetcher(JobConf job, TaskAttemptID reduceId, setDaemon(true); } - public void run() { + public void work() { // Create a worklist of task attempts to work over. Set maps = new HashSet(); for (TaskAttemptID map : localMapFiles.keySet()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java index c617569da33e8..9bf3edc1a7470 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java @@ -26,10 +26,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -abstract class MergeThread extends Thread { +abstract class MergeThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(MergeThread.class); @@ -78,7 +79,7 @@ public synchronized void waitForMerge() throws InterruptedException { } } - public void run() { + public void work() { while (true) { List inputs = null; try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java index 173cd093e9f6e..5e0dc0b3ba8c1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java @@ -49,6 +49,7 @@ import org.apache.hadoop.mapreduce.task.reduce.MapHost.State; import org.apache.hadoop.util.Progress; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -554,13 +555,13 @@ public int compareTo(Delayed o) { /** * A thread that takes hosts off of the penalty list when the timer expires. */ - private class Referee extends Thread { + private class Referee extends SubjectInheritingThread { public Referee() { setName("ShufflePenaltyReferee"); setDaemon(true); } - public void run() { + public void work() { try { while (true) { // take the first host that has an expired penalty diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java index 585a21d568231..473dff3b9430f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ProcessTree.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -326,7 +327,7 @@ public static boolean isProcessGroupAlive(String pgrpId) { /** * Helper thread class that kills process-tree with SIGKILL in background */ - static class SigKillThread extends Thread { + static class SigKillThread extends SubjectInheritingThread { private String pid = null; private boolean isProcessGroup = false; @@ -339,7 +340,7 @@ private SigKillThread(String pid, boolean isProcessGroup, long interval) { sleepTimeBeforeSigKill = interval; } - public void run() { + public void work() { sigKillInCurrentThread(pid, isProcessGroup, sleepTimeBeforeSigKill); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java index e624b0304f166..0b79d56de9d37 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestIndexCache.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.mapreduce.MRJobConfig; import org.junit.jupiter.api.BeforeEach; @@ -221,9 +222,9 @@ public void testRemoveMap() throws Exception { // run multiple times for (int i = 0; i < 20; ++i) { - Thread getInfoThread = new Thread() { + Thread getInfoThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { cache.getIndexInformation("bigIndex", partsPerMap, big, user); } catch (Exception e) { @@ -231,9 +232,9 @@ public void run() { } } }; - Thread removeMapThread = new Thread() { + Thread removeMapThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { cache.removeMap("bigIndex"); } }; @@ -266,9 +267,9 @@ public void testCreateRace() throws Exception { // run multiple instances Thread[] getInfoThreads = new Thread[50]; for (int i = 0; i < 50; i++) { - getInfoThreads[i] = new Thread() { + getInfoThreads[i] = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { cache.getIndexInformation("racyIndex", partsPerMap, racy, user); cache.removeMap("racyIndex"); @@ -285,9 +286,9 @@ public void run() { final Thread mainTestThread = Thread.currentThread(); - Thread timeoutThread = new Thread() { + Thread timeoutThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { Thread.sleep(15000); mainTestThread.interrupt(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java index 0d24cbc323a86..0de980215c658 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLocatedFileStatusFetcher.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.test.AbstractHadoopTestBase; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -77,9 +78,9 @@ public boolean accept(Path path) { } }, true); - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { fetcher.getFileStatuses(); } catch (Exception e) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java index ef43beaa5f797..f991756241941 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.checkpoint.TaskCheckpointID; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -252,7 +253,7 @@ public void uncaughtException(Thread th, Throwable ex) { task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); reporter.startDiskLimitCheckerThreadIfNeeded(); - Thread t = new Thread(reporter); + Thread t = new SubjectInheritingThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); t.start(); @@ -273,7 +274,7 @@ public void testTaskProgress() throws Exception { Task task = new DummyTask(); task.setConf(job); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new Thread(reporter); + Thread t = new SubjectInheritingThread(reporter); t.start(); Thread.sleep(2100); task.setTaskDone(); @@ -328,7 +329,7 @@ public void uncaughtException(Thread th, Throwable ex) { Task task = new DummyTask(); task.setConf(conf); DummyTaskReporter reporter = new DummyTaskReporter(task); - Thread t = new Thread(reporter); + Thread t = new SubjectInheritingThread(reporter); t.setUncaughtExceptionHandler(h); reporter.setProgressFlag(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java index db7d6a980edb7..ae6f488a6a6e2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java @@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; @@ -184,9 +185,9 @@ public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() dfsCluster.getFileSystem().setSafeMode( SafeModeAction.ENTER); assertTrue(dfsCluster.getFileSystem().isInSafeMode()); - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { Thread.sleep(500); dfsCluster.getFileSystem().setSafeMode( @@ -209,9 +210,9 @@ public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout() assertTrue(dfsCluster.getFileSystem().isInSafeMode()); final ControlledClock clock = new ControlledClock(); clock.setTime(1); - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { Thread.sleep(500); clock.setTime(3000); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java index b3b66e560d940..08559399dbd43 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java @@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.service.Service; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.EventHandler; import org.junit.jupiter.api.Test; @@ -254,7 +255,7 @@ protected EventHandler createJobHistoryHandler( @Override protected void serviceStart() { // Don't start any event draining thread. - super.eventHandlingThread = new Thread(); + super.eventHandlingThread = new SubjectInheritingThread(); super.eventHandlingThread.start(); } }; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java index 2a7a9f3c80bef..b18e0c041da86 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java @@ -26,6 +26,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.junit.jupiter.api.AfterAll; @@ -106,7 +107,7 @@ public void testTwoThreadsQueryingDifferentJobOfSameUser() * files in one child thread. */ createJhistFile(job1); - webRequest1 = new Thread( + webRequest1 = new SubjectInheritingThread( new Runnable() { @Override public void run() { @@ -136,7 +137,7 @@ public void run() { * will also see the job history files for job1. */ createJhistFile(job2); - webRequest2 = new Thread( + webRequest2 = new SubjectInheritingThread( new Runnable() { @Override public void run() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java index 33a60681a35bf..3c36f13dcc3e6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/FailingMapper.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Fails the Mapper. First attempt throws exception. Rest do System.exit. @@ -33,9 +34,9 @@ public void map(Text key, Text value, // Just create a non-daemon thread which hangs forever. MR AM should not be // hung by this. - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { synchronized (this) { try { wait(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java index 125dad5cbe14d..c9c1cc482564a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java @@ -44,6 +44,7 @@ import org.apache.hadoop.mapred.*; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -178,7 +179,7 @@ enum StatSeries { public String toString() {return statName;} } - private static class FileCreateDaemon extends Thread { + private static class FileCreateDaemon extends SubjectInheritingThread { private static final int NUM_CREATE_THREADS = 10; private static volatile int numFinishedThreads; private static volatile int numRunningThreads; @@ -194,7 +195,7 @@ private static class FileCreateDaemon extends Thread { this.end = end; } - public void run() { + public void work() { try { for(int i=start; i < end; i++) { String name = getFileName(i); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java index 044c77c0853dd..de6668ffdf392 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java @@ -48,6 +48,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.TextOutputFormat; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -308,7 +309,7 @@ public void configure(JobConf job) { getArgsFromConfiguration(jobConf); } - private class ProgressThread extends Thread { + private class ProgressThread extends SubjectInheritingThread { boolean keepGoing; // while this is true, thread runs. private Reporter reporter; @@ -318,7 +319,7 @@ public ProgressThread(final Reporter r) { this.keepGoing = true; } - public void run() { + public void work() { while (keepGoing) { if (!ProgressThread.interrupted()) { try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java index 303857bf70e0b..0952f089eda43 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java @@ -39,6 +39,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -200,8 +201,8 @@ private void checkJobExitStatus(int status, String jobName) { private void runTest(final JobClient jc, final Configuration conf, final String jobClass, final String[] args, KillTaskThread killTaskThread, KillTrackerThread killTrackerThread) throws Exception { - Thread t = new Thread("Job Test") { - public void run() { + SubjectInheritingThread t = new SubjectInheritingThread("Job Test") { + public void work() { try { Class jobClassObj = conf.getClassByName(jobClass); int status = ToolRunner.run(conf, (Tool)(jobClassObj.newInstance()), @@ -249,7 +250,7 @@ public void run() { t.join(); } - private class KillTrackerThread extends Thread { + private class KillTrackerThread extends SubjectInheritingThread { private volatile boolean killed = false; private JobClient jc; private RunningJob rJob; @@ -281,7 +282,7 @@ public void setRunningJob(RunningJob rJob) { public void kill() { killed = true; } - public void run() { + public void work() { stopStartTrackers(true); if (!onlyMapsProgress) { stopStartTrackers(false); @@ -392,7 +393,7 @@ private String convertTrackerNameToHostName(String trackerName) { } - private class KillTaskThread extends Thread { + private class KillTaskThread extends SubjectInheritingThread { private volatile boolean killed = false; private RunningJob rJob; @@ -416,7 +417,7 @@ public void setRunningJob(RunningJob rJob) { public void kill() { killed = true; } - public void run() { + public void work() { killBasedOnProgress(true); if (!onlyMapsProgress) { killBasedOnProgress(false); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java index 83ea506bcd073..35b3d14552630 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCollect.java @@ -21,6 +21,7 @@ import org.apache.hadoop.io.*; import org.apache.hadoop.mapred.UtilsForTests.RandomInputFormat; import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import java.io.*; @@ -51,14 +52,14 @@ public void map(Text key, Text val, final OutputCollector out, Reporter reporter) throws IOException { // Class for calling collect in separate threads - class CollectFeeder extends Thread { + class CollectFeeder extends SubjectInheritingThread { int id; // id for the thread public CollectFeeder(int id) { this.id = id; } - public void run() { + public void work() { for (int j = 1; j <= NUM_COLLECTS_PER_THREAD; j++) { try { out.collect(new IntWritable((id * NUM_COLLECTS_PER_THREAD) + j), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java index 7f26bb33e8179..0d55f3f986e80 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestJobControl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -110,7 +111,7 @@ public static void doJobControlTest() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new Thread(theControl); + Thread theController = new SubjectInheritingThread(theControl); theController.start(); while (!theControl.allFinished()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java index 92a5868a56b9a..f64a5270891f7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/TestLocalJobControl.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -115,7 +116,7 @@ public void testLocalJobControlDataCopy() throws Exception { theControl.addJob(job_3); theControl.addJob(job_4); - Thread theController = new Thread(theControl); + Thread theController = new SubjectInheritingThread(theControl); theController.start(); while (!theControl.allFinished()) { LOG.debug("Jobs in waiting state: " + theControl.getWaitingJobs().size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java index 3c6cbbe9ea5ea..6e7899a95a368 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java @@ -519,8 +519,8 @@ private static class SocketCleaner extends PingSocketCleaner { } @Override - public void run() { - super.run(); + public void work() { + super.work(); } protected void closeSocketInternal(Socket clientSocket) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java index d97ea5c8f7ae9..1bcdd26a1cbc8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -317,8 +318,8 @@ public void testMultiMaps() throws Exception { FileOutputFormat.setOutputPath(job, outputPath); final Thread toInterrupt = Thread.currentThread(); - Thread interrupter = new Thread() { - public void run() { + SubjectInheritingThread interrupter = new SubjectInheritingThread() { + public void work() { try { Thread.sleep(120*1000); // 2m toInterrupt.interrupt(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java index 1bae2b0fe2c73..431eac646feaf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java @@ -28,6 +28,7 @@ import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MapReduceTestUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -112,7 +113,7 @@ private JobControl createDependencies(Configuration conf, Job job1) theControl.addJob(cjob2); theControl.addJob(cjob3); theControl.addJob(cjob4); - Thread theController = new Thread(theControl); + Thread theController = new SubjectInheritingThread(theControl); theController.start(); return theControl; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java index 0b423797e5c82..efa402a9efff5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControlWithMocks.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; /** @@ -148,7 +149,7 @@ private ControlledJob createFailedControlledJob(JobControl jobControl, } private void runJobControl(JobControl jobControl) { - Thread controller = new Thread(jobControl); + Thread controller = new SubjectInheritingThread(jobControl); controller.start(); waitTillAllFinished(jobControl); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java index b3533482b525d..77f3322977ece 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/MiniMRYarnCluster.java @@ -45,6 +45,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JarFinder; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.MiniYARNCluster; @@ -266,8 +267,8 @@ public synchronized void serviceStart() throws Exception { } historyServer = new JobHistoryServer(); historyServer.init(getConfig()); - new Thread() { - public void run() { + new SubjectInheritingThread() { + public void work() { historyServer.start(); }; }.start(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java index 6a437b123c8ba..5e0d1667c11b0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/StatusReportChecker.java @@ -22,6 +22,7 @@ import org.apache.hadoop.mapred.Task.TaskReporter; import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +86,7 @@ public synchronized void start() { // init counters used by native side, // so they will have correct display name initUsedCounters(); - checker = new Thread(this); + checker = new SubjectInheritingThread(this); checker.setDaemon(true); checker.start(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java index 3ce6936c3d7dc..5b558b391fa04 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java @@ -42,6 +42,7 @@ import org.apache.hadoop.util.IndexedSortable; import org.apache.hadoop.util.QuickSort; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.functional.FutureIO; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY; @@ -145,11 +146,11 @@ public static void writePartitionFile(final JobContext job, for(int i=0; i < samples; ++i) { final int idx = i; samplerReader[i] = - new Thread (threadGroup,"Sampler Reader " + idx) { + new SubjectInheritingThread (threadGroup, "Sampler Reader " + idx) { { setDaemon(true); } - public void run() { + public void work() { long records = 0; try { TaskAttemptContext context = new TaskAttemptContextImpl( diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java index 12a1cd7d8f63e..2665013a5b455 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticsContext; import org.apache.hadoop.fs.statistics.impl.IOStatisticsContextImpl; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.functional.CloseableTaskPoolSubmitter; import org.apache.hadoop.util.functional.TaskPool; @@ -457,7 +458,7 @@ public void testListingThroughTaskPool() throws Throwable { * If constructed with an IOStatisticsContext then * that context is switched to before performing the IO. */ - private class TestWorkerThread extends Thread implements Runnable { + private class TestWorkerThread extends SubjectInheritingThread implements Runnable { private final Path workerThreadPath; private final IOStatisticsContext ioStatisticsContext; @@ -475,7 +476,7 @@ private class TestWorkerThread extends Thread implements Runnable { } @Override - public void run() { + public void work() { // Setting the worker thread's name. Thread.currentThread().setName("worker thread"); S3AFileSystem fs = getFileSystem(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java index 96c59c266a647..6e4d62129c48e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.test.tags.ScaleTest; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; @@ -135,7 +136,7 @@ concurrentRenames, new ThreadFactory() { private AtomicInteger count = new AtomicInteger(0); public Thread newThread(Runnable r) { - return new Thread(r, + return new SubjectInheritingThread(r, "testParallelRename" + count.getAndIncrement()); } }); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java index 7cd1821c3da50..98d28d1ea421a 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureFileSystemThreadPoolExecutor.java @@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -256,7 +257,7 @@ public AzureFileSystemThreadFactory(String prefix) { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new SubjectInheritingThread(r); // Use current thread name as part in naming thread such that use of // same file system object will have unique names. diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java index 4c8d5fb6a5f71..c19bbe48fc3f6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.impl.StoreImplementationUtils; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSExceptionMessages; @@ -821,7 +822,7 @@ class UploaderThreadFactory implements ThreadFactory { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new SubjectInheritingThread(r); t.setName(String.format("%s-%d", THREAD_ID_PREFIX, threadSequenceNumber.getAndIncrement())); return t; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 989c3ba6d9340..8ab568fdc3bfc 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import com.microsoft.azure.storage.AccessCondition; import com.microsoft.azure.storage.StorageException; @@ -105,7 +106,7 @@ public SelfRenewingLease(CloudBlobWrapper blobWrapper, boolean throwIfPresent) } } } - renewer = new Thread(new Renewer()); + renewer = new SubjectInheritingThread(new Renewer()); // A Renewer running should not keep JVM from exiting, so make it a daemon. renewer.setDaemon(true); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java index d3fe4aefeb050..d0a1bd0e7fb63 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java @@ -22,6 +22,7 @@ import java.util.Date; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Internal implementation class to help calculate the current bytes @@ -67,7 +68,7 @@ public BandwidthGaugeUpdater(AzureFileSystemInstrumentation instrumentation, this.windowSizeMs = windowSizeMs; this.instrumentation = instrumentation; if (!manualUpdateTrigger) { - uploadBandwidthUpdater = new Thread(new UploadBandwidthUpdater(), THREAD_NAME); + uploadBandwidthUpdater = new SubjectInheritingThread(new UploadBandwidthUpdater(), THREAD_NAME); uploadBandwidthUpdater.setDaemon(true); uploadBandwidthUpdater.start(); } diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java index 74f5aa4ffb573..7a0e42c46f257 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ListActionTaker.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultEntrySchema; import org.apache.hadoop.fs.azurebfs.contracts.services.ListResultSchema; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.ROOT_PATH; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_LIST_MAX_RESULTS; @@ -151,7 +152,7 @@ public boolean listRecursiveAndTakeAction() Thread producerThread = null; try { ListBlobQueue listBlobQueue = createListBlobQueue(configuration); - producerThread = new Thread(() -> { + producerThread = new SubjectInheritingThread(() -> { try { produceConsumableList(listBlobQueue); } catch (AzureBlobFileSystemException e) { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java index e476f6d744614..240a618666621 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV1.java @@ -27,6 +27,7 @@ import java.util.concurrent.CountDownLatch; import org.apache.hadoop.fs.azurebfs.utils.TracingContext; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.classification.VisibleForTesting; /** @@ -92,7 +93,7 @@ void init() { getFreeList().add(i); } for (int i = 0; i < NUM_THREADS; i++) { - Thread t = new Thread(new ReadBufferWorker(i, this)); + Thread t = new SubjectInheritingThread(new ReadBufferWorker(i, this)); t.setDaemon(true); threads[i] = t; t.setName("ABFS-prefetch-" + i); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java index c7e6e4c3d28d4..7943755b97979 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/ReadBufferManagerV2.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.HUNDRED_D; @@ -978,7 +979,7 @@ private static void setIsConfigured(boolean configured) { @Override public Thread newThread(Runnable r) { - Thread t = new Thread(r, "ReadAheadV2-WorkerThread-" + count++); + Thread t = new SubjectInheritingThread(r, "ReadAheadV2-WorkerThread-" + count++); t.setDaemon(true); return t; } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java index f28a15fd7149f..c008d64386bf8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestAzureConcurrentOutOfBandIo.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.azure.integration.AzureTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Handle OOB IO into a shared container. @@ -74,7 +75,7 @@ public DataBlockWriter(AzureBlobStorageTestAccount testAccount, String key) { * Start writing blocks to Azure storage. */ public void startWriting() { - runner = new Thread(this); // Create the block writer thread. + runner = new SubjectInheritingThread(this); // Create the block writer thread. runner.start(); // Start the block writer thread. } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java index ab175ba6c5c15..19080d031b6c9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.fs.azure.ExceptionHandlingTestHelper.*; @@ -94,7 +95,7 @@ public void testMultiThreadedBlockBlobReadScenario() throws Throwable { Path testFilePath1 = new Path(base, "test1.dat"); Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -121,7 +122,7 @@ public void testMultiThreadBlockBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -142,7 +143,7 @@ public void testMultiThreadedPageBlobSetPermissionScenario() createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -161,7 +162,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setPermission(testPath, @@ -179,7 +180,7 @@ public void testMultiThreadedBlockBlobSetPermissionScenario() public void testMultiThreadedPageBlobOpenScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { inputStream = fs.open(testPath); @@ -200,7 +201,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { @@ -219,7 +220,7 @@ public void testMultiThreadedBlockBlobOpenScenario() throws Throwable { public void testMultiThreadedBlockBlobSetOwnerScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createEmptyFile(createTestAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -237,7 +238,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { createEmptyFile( getPageBlobTestStorageAccount(), testPath); - Thread t = new Thread(new DeleteThread(fs, testPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testPath)); t.start(); while (t.isAlive()) { fs.setOwner(testPath, "testowner", "testgroup"); @@ -253,7 +254,7 @@ public void testMultiThreadedPageBlobSetOwnerScenario() throws Throwable { public void testMultiThreadedBlockBlobListStatusScenario() throws Throwable { assertThrows(FileNotFoundException.class, () -> { createTestFolder(createTestAccount(), testFolderPath); - Thread t = new Thread(new DeleteThread(fs, testFolderPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -271,7 +272,7 @@ public void testMultiThreadedPageBlobListStatusScenario() throws Throwable { createTestFolder( getPageBlobTestStorageAccount(), testFolderPath); - Thread t = new Thread(new DeleteThread(fs, testFolderPath)); + Thread t = new SubjectInheritingThread(new DeleteThread(fs, testFolderPath)); t.start(); while (t.isAlive()) { fs.listStatus(testFolderPath); @@ -293,7 +294,7 @@ public void testMultiThreadedPageBlobReadScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); @@ -318,7 +319,7 @@ public void testMultiThreadedPageBlobSeekScenario() throws Throwable { Path renamePath = new Path(base, "test2.dat"); getInputStreamToTest(fs, testFilePath1); - Thread renameThread = new Thread( + Thread renameThread = new SubjectInheritingThread( new RenameThread(fs, testFilePath1, renamePath)); renameThread.start(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java index 918866a73e5d7..fd2ba05a35ac4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemLive.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -67,7 +68,7 @@ public void testMultipleRenameFileOperationsToSameDestination() for (int i = 0; i < 10; i++) { final int threadNumber = i; Path src = path("test" + threadNumber); - threads.add(new Thread(() -> { + threads.add(new SubjectInheritingThread(() -> { try { latch.await(Long.MAX_VALUE, TimeUnit.SECONDS); } catch (InterruptedException e) { @@ -155,9 +156,9 @@ public void testDeleteThrowsExceptionWithLeaseExistsErrorMessage() // Acquire the lease on the file in a background thread final CountDownLatch leaseAttemptComplete = new CountDownLatch(1); final CountDownLatch beginningDeleteAttempt = new CountDownLatch(1); - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { // Acquire the lease and then signal the main test thread. SelfRenewingLease lease = null; try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java index ecf6e17b82aa7..7730d5283d7c1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; @@ -1643,9 +1644,9 @@ public void testLeaseAsDistributedLock() throws IllegalArgumentException, NativeAzureFileSystem nfs = (NativeAzureFileSystem) fs; String fullKey = nfs.pathToKey(nfs.makeAbsolute(new Path(LEASE_LOCK_FILE_KEY))); - Thread first = new Thread(new LeaseLockAction("first-thread", fullKey)); + Thread first = new SubjectInheritingThread(new LeaseLockAction("first-thread", fullKey)); first.start(); - Thread second = new Thread(new LeaseLockAction("second-thread", fullKey)); + Thread second = new SubjectInheritingThread(new LeaseLockAction("second-thread", fullKey)); second.start(); try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java index f801f5e9ddae9..a5bd553839786 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemConcurrency.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -149,7 +150,7 @@ public void testMultiThreadedOperation() throws Exception { final ConcurrentLinkedQueue exceptionsEncountered = new ConcurrentLinkedQueue(); for (int i = 0; i < numThreads; i++) { final Path threadLocalFile = new Path("/myFile" + i); - threads[i] = new Thread(new Runnable() { + threads[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java index 187aa02cceb93..2ecc2592b47be 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/metrics/TestBandwidthGaugeUpdater.java @@ -24,6 +24,7 @@ import java.util.Date; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; public class TestBandwidthGaugeUpdater { @@ -56,7 +57,7 @@ public void testMultiThreaded() throws Exception { new BandwidthGaugeUpdater(instrumentation, 1000, true); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(new Runnable() { + threads[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { updater.blockDownloaded(new Date(), new Date(), 10); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index 0f7e6d9009b8a..557333ff0fd9a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.store.BlockUploadStatistics; import org.apache.hadoop.fs.store.DataBlocks; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; import static java.net.HttpURLConnection.HTTP_OK; @@ -1243,7 +1244,7 @@ public void testFlushSuccessWithConnectionResetOnResponseInvalidMd5() throws Exc out1.write(bytes1); //parallel flush call should lead to the first call failing because of md5 mismatch. - Thread parallelFlushThread = new Thread(() -> { + Thread parallelFlushThread = new SubjectInheritingThread(() -> { try { out1.hsync(); } catch (IOException e) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index 73a826c601a4e..1251880cc2b95 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -70,6 +70,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticAssertions; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.functional.FunctionRaisingIOE; import static java.net.HttpURLConnection.HTTP_CLIENT_TIMEOUT; @@ -1014,7 +1015,7 @@ public void testParallelRenameForAtomicRenameShouldFail() throws Exception { .acquireLease(Mockito.anyString(), Mockito.anyInt(), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new Thread(() -> { + new SubjectInheritingThread(() -> { while (!leaseAcquired.get()) {} try { fs.rename(src, dst); @@ -1064,7 +1065,7 @@ public void testAppendAtomicBlobDuringRename() throws Exception { return answer.callRealMethod(); }).when(client).copyBlob(Mockito.any(Path.class), Mockito.any(Path.class), Mockito.nullable(String.class), Mockito.any(TracingContext.class)); - new Thread(() -> { + new SubjectInheritingThread(() -> { while (!copyInProgress.get()) {} try { os.write(1); diff --git a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java index 6b6596c38d821..e80ad8aca0545 100644 --- a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java +++ b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/common/HdfsCompatShellScope.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -367,7 +368,7 @@ private List readLines(File file) throws IOException { return lines; } - private static final class StreamPrinter extends Thread { + private static final class StreamPrinter extends SubjectInheritingThread { private final InputStream in; private final List lines; @@ -377,7 +378,7 @@ private StreamPrinter(InputStream in) { } @Override - public void run() { + public void work() { try (BufferedReader br = new BufferedReader( new InputStreamReader(in, StandardCharsets.UTF_8))) { String line = br.readLine(); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java index 9c461cb18fb8a..b77bf5afb97ca 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestProducerConsumer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.tools.util.WorkReport; import org.apache.hadoop.tools.util.WorkRequest; import org.apache.hadoop.tools.util.WorkRequestProcessor; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -146,8 +147,8 @@ public void testMultipleProducerConsumerShutdown() // starts two thread: a source thread which put in work, and a sink thread // which takes a piece of work from ProducerConsumer - class SourceThread extends Thread { - public void run() { + class SourceThread extends SubjectInheritingThread { + public void work() { while (true) { try { worker.put(new WorkRequest(42)); @@ -161,8 +162,8 @@ public void run() { // The source thread put requests into producer-consumer. SourceThread source = new SourceThread(); source.start(); - class SinkThread extends Thread { - public void run() { + class SinkThread extends SubjectInheritingThread { + public void work() { try { while (true) { WorkReport report = worker.take(); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java index e44f811f0db41..f06195fccfbd7 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/ApplicationMaster.java @@ -19,6 +19,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import java.io.IOException; import java.nio.ByteBuffer; @@ -549,7 +550,7 @@ public void onContainersAllocated(List allocatedContainers) { + container.getNodeHttpAddress() + ", containerResourceMemory=" + rsrc.getMemorySize() + ", containerResourceVirtualCores=" + rsrc.getVirtualCores()); - Thread launchThread = new Thread(containerLauncher); + Thread launchThread = new SubjectInheritingThread(containerLauncher); // launch and start the container on a separate thread to keep // the main thread unblocked diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java index 0c57542747e41..f6b2368dd2b76 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/Client.java @@ -77,6 +77,7 @@ import org.apache.hadoop.util.ClassUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; @@ -891,7 +892,7 @@ private boolean monitorInfraApplication() throws YarnException, IOException { boolean loggedApplicationInfo = false; boolean success = false; - Thread namenodeMonitoringThread = new Thread(() -> { + Thread namenodeMonitoringThread = new SubjectInheritingThread(() -> { Supplier exitCritera = () -> Apps.isApplicationFinalState(infraAppState); Optional namenodeProperties = Optional.empty(); diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java index f6c8a6ac4d58b..f3779a33c8c1c 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/main/java/org/apache/hadoop/tools/dynamometer/DynoInfraUtils.java @@ -52,6 +52,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; @@ -319,7 +320,7 @@ static void waitForNameNodeReadiness(final Properties nameNodeProperties, .get(getNameNodeHdfsUri(nameNodeProperties), conf); log.info("Launching thread to trigger block reports for Datanodes with <" + blockThreshold + " blocks reported"); - Thread blockReportThread = new Thread(() -> { + Thread blockReportThread = new SubjectInheritingThread(() -> { // Here we count both Missing and UnderReplicated within under // replicated long lastUnderRepBlocks = Long.MAX_VALUE; diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java index db34037da7806..7326edb7f7d2e 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-infra/src/test/java/org/apache/hadoop/tools/dynamometer/TestDynamometerInfra.java @@ -57,6 +57,7 @@ import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -461,7 +462,7 @@ private Client createAndStartClient(Configuration localConf) { final Client client = new Client(JarFinder.getJar(ApplicationMaster.class), JarFinder.getJar(Assertions.class)); client.setConf(localConf); - Thread appThread = new Thread(() -> { + Thread appThread = new SubjectInheritingThread(() -> { try { client.run(new String[] {"-" + Client.MASTER_MEMORY_MB_ARG, "128", "-" + Client.CONF_PATH_ARG, confZip.toString(), diff --git a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java index 14e8c9cb82f16..fe8281e5da7b5 100644 --- a/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java +++ b/hadoop-tools/hadoop-dynamometer/hadoop-dynamometer-workload/src/main/java/org/apache/hadoop/tools/dynamometer/workloadgenerator/audit/AuditReplayThread.java @@ -43,6 +43,7 @@ import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.REPLAYCOUNTERS; import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditReplayMapper.ReplayCommand; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +58,7 @@ * are inserted by the {@link AuditReplayMapper}. Once an item is ready, this * thread will fetch the command from the queue and attempt to replay it. */ -public class AuditReplayThread extends Thread { +public class AuditReplayThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory.getLogger(AuditReplayThread.class); @@ -154,7 +155,7 @@ Exception getException() { } @Override - public void run() { + public void work() { long currentEpoch = System.currentTimeMillis(); long delay = startTimestampMs - currentEpoch; try { diff --git a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java index 4d4e9a26b3de4..6eabff12d006a 100644 --- a/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java +++ b/hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedureScheduler.java @@ -36,6 +36,7 @@ import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -309,9 +310,9 @@ public void setJournal(BalanceJournal journal) { /** * This thread consumes the delayQueue and move the jobs to the runningQueue. */ - class Rooster extends Thread { + class Rooster extends SubjectInheritingThread { @Override - public void run() { + public void work() { while (running.get()) { try { DelayWrapper dJob = delayQueue.take(); @@ -327,9 +328,9 @@ public void run() { /** * This thread consumes the runningQueue and give the job to the workers. */ - class Reader extends Thread { + class Reader extends SubjectInheritingThread { @Override - public void run() { + public void work() { while (running.get()) { try { final BalanceJob job = runningQueue.poll(500, TimeUnit.MILLISECONDS); @@ -361,9 +362,9 @@ public void run() { * This thread consumes the recoverQueue, recovers the job the adds it to the * runningQueue. */ - class Recover extends Thread { + class Recover extends SubjectInheritingThread { @Override - public void run() { + public void work() { while (running.get()) { BalanceJob job = null; try { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java index e476223cf1e23..82fed9d568b6d 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Gridmix.java @@ -43,6 +43,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.tools.rumen.JobStoryProducer; import org.apache.hadoop.tools.rumen.ZombieJobProducer; import org.slf4j.Logger; @@ -627,7 +628,7 @@ private int setupDistCacheEmulation(Configuration conf, String traceIn, * pipeline abort its progress, waiting for each to exit and killing * any jobs still running on the cluster. */ - class Shutdown extends Thread { + class Shutdown extends SubjectInheritingThread { static final long FAC_SLEEP = 1000; static final long SUB_SLEEP = 4000; @@ -647,7 +648,7 @@ private void killComponent(Component component, long maxwait) { } @Override - public void run() { + public void work() { LOG.info("Exiting..."); try { killComponent(factory, FAC_SLEEP); // read no more tasks diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java index 0b06911be0857..cb3192dca5bca 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobMonitor.java @@ -35,6 +35,7 @@ import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.JobStatus; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Component accepting submitted, running {@link Statistics.JobStats} and @@ -133,14 +134,14 @@ List getRemainingJobs() { * Monitoring thread pulling running jobs from the component and into * a queue to be polled for status. */ - private class MonitorThread extends Thread { + private class MonitorThread extends SubjectInheritingThread { public MonitorThread(int i) { super("GridmixJobMonitor-" + i); } @Override - public void run() { + public void work() { boolean graceful; boolean shutdown; while (true) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java index d1229ce2d8ff4..bc98116e5b8b9 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/LoadJob.java @@ -41,6 +41,7 @@ import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.TaskInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import java.io.IOException; @@ -143,7 +144,7 @@ private void configure() { * This is a progress based resource usage matcher. */ @SuppressWarnings("unchecked") - static class ResourceUsageMatcherRunner extends Thread + static class ResourceUsageMatcherRunner extends SubjectInheritingThread implements Progressive { private final ResourceUsageMatcher matcher; private final BoostingProgress progress; @@ -199,7 +200,7 @@ protected void match() throws IOException, InterruptedException { } @Override - public void run() { + public void work() { LOG.info("Resource usage matcher thread started."); try { while (progress.getProgress() < 1) { @@ -234,7 +235,7 @@ void boost(float value) { // Makes sure that the TaskTracker doesn't kill the map/reduce tasks while // they are emulating - private static class StatusReporter extends Thread { + private static class StatusReporter extends SubjectInheritingThread { private final TaskAttemptContext context; private final Progressive progress; @@ -244,7 +245,7 @@ private static class StatusReporter extends Thread { } @Override - public void run() { + public void work() { LOG.info("Status reporter thread started."); try { while (!isInterrupted() && progress.getProgress() < 1) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java index fe3b5d36d9841..60572a515fb77 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ReplayJobFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,14 +65,14 @@ public Thread createReaderThread() { public void update(Statistics.ClusterStats item) { } - private class ReplayReaderThread extends Thread { + private class ReplayReaderThread extends SubjectInheritingThread { public ReplayReaderThread(String threadName) { super(threadName); } - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java index cb05ab63f1c07..5f61d7e32ab4f 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/SerialJobFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.mapred.gridmix.Statistics.JobStats; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; @@ -59,7 +60,7 @@ public Thread createReaderThread() { return new SerialReaderThread("SerialJobFactory"); } - private class SerialReaderThread extends Thread { + private class SerialReaderThread extends SubjectInheritingThread { public SerialReaderThread(String threadName) { super(threadName); @@ -78,7 +79,7 @@ public SerialReaderThread(String threadName) { * == */ @Override - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java index bf73f2a1faa55..9178fdb85cb51 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/Statistics.java @@ -28,6 +28,7 @@ import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -216,13 +217,13 @@ public void start() { statistics.start(); } - private class StatCollector extends Thread { + private class StatCollector extends SubjectInheritingThread { StatCollector() { super("StatsCollectorThread"); } - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java index 4e7fc9c2bbd80..6e9c04a47b522 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/StressJobFactory.java @@ -30,6 +30,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStoryProducer; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import java.io.IOException; import java.util.HashSet; @@ -136,7 +137,7 @@ public Thread createReaderThread() { * Worker thread responsible for reading descriptions, assigning sequence * numbers, and normalizing time. */ - private class StressReaderThread extends Thread { + private class StressReaderThread extends SubjectInheritingThread { public StressReaderThread(String name) { super(name); @@ -152,7 +153,7 @@ public StressReaderThread(String name) { * load the JT. * That is submit (Sigma(no of maps/Job)) > (2 * no of slots available) */ - public void run() { + public void work() { try { startFlag.await(); if (Thread.currentThread().isInterrupted()) { diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java index 23e1413fcad87..4d2646fb84c71 100644 --- a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java +++ b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/service/ShutdownHook.java @@ -20,13 +20,14 @@ package org.apache.hadoop.resourceestimator.service; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Simple shutdown hook for {@link ResourceEstimatorServer}. */ -public class ShutdownHook extends Thread { +public class ShutdownHook extends SubjectInheritingThread { private static final Logger LOGGER = LoggerFactory.getLogger(ShutdownHook.class); private final ResourceEstimatorServer server; @@ -35,7 +36,7 @@ public class ShutdownHook extends Thread { this.server = server; } - public void run() { + public void work() { try { server.shutdown(); } catch (Exception e) { diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java index 3d6541565cb44..d3a2287fb1cb9 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapRed.java @@ -38,6 +38,7 @@ import org.apache.hadoop.streaming.io.TextOutputReader; import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.io.Text; @@ -366,7 +367,7 @@ OutputReader createOutputReader(Class outputReaderClass) } - class MROutputThread extends Thread { + class MROutputThread extends SubjectInheritingThread { MROutputThread(OutputReader outReader, OutputCollector outCollector, Reporter reporter) { @@ -376,7 +377,7 @@ class MROutputThread extends Thread { this.reporter = reporter; } - public void run() { + public void work() { try { // 3/4 Tool to Hadoop while (outReader.readKeyValue()) { @@ -418,7 +419,7 @@ public void run() { } - class MRErrorThread extends Thread { + class MRErrorThread extends SubjectInheritingThread { public MRErrorThread() { this.reporterPrefix = job_.get("stream.stderr.reporter.prefix", "reporter:"); @@ -431,7 +432,7 @@ public void setReporter(Reporter reporter) { this.reporter = reporter; } - public void run() { + public void work() { Text line = new Text(); LineReader lineReader = null; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index d6ec1e4d4c516..dad3920951418 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -69,6 +69,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; @@ -1761,7 +1762,7 @@ Thread createLaunchContainerThread(Container allocatedContainer, LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable(allocatedContainer, containerListener, shellId); - return new Thread(runnableLaunchContainer); + return new SubjectInheritingThread(runnableLaunchContainer); } private void publishContainerStartEventOnTimelineServiceV2( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java index 607a4c90d7e93..34d4273a089b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/DistributedShellBaseTest.java @@ -53,6 +53,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -333,7 +334,7 @@ protected void baseTestDSShell(String methodName, boolean haveDomain, boolean de assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread t = new Thread(() -> { + Thread t = new SubjectInheritingThread(() -> { try { result.set(dsClient.run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java index 81420465afb90..62defa28bbfc4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; @@ -69,7 +70,7 @@ protected Thread createLaunchContainerThread(Container allocatedContainer, threadsLaunched++; launchedContainers.add(allocatedContainer.getId()); yarnShellIds.add(shellId); - return new Thread(); + return new SubjectInheritingThread(); } void setNumTotalContainers(int numTotalContainers) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java index 6ef26ed1cce72..f75808694b819 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSTimelineV20.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -126,7 +127,7 @@ public void testDSShellWithEnforceExecutionType(TestInfo testInfo) throws Except try { setAndGetDSClient(new Configuration(getYarnClusterConfiguration())); getDSClient().init(args); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new SubjectInheritingThread(() -> { try { getDSClient().run(); } catch (Exception e) { @@ -220,7 +221,7 @@ private void doTestDistributedShellWithResources( assertTrue(getDSClient().init(args)); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new SubjectInheritingThread(() -> { try { result.set(getDSClient().run()); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java index 7ebc747ed2ea1..f38b504420807 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSWithMultipleNodeManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.client.api.YarnClient; @@ -320,7 +321,7 @@ public void testDistributedShellWithAllocationTagNamespace( new Client( new Configuration(distShellTest.getYarnClusterConfiguration())); dsClient.init(argsA); - Thread dsClientRunner = new Thread(() -> { + Thread dsClientRunner = new SubjectInheritingThread(() -> { try { dsClient.run(); } catch (Exception e) { @@ -455,7 +456,7 @@ private void waitForExpectedNMsCount(int[] expectedNMCounts, /** * Monitor containers running on NMs. */ - class NMContainerMonitor extends Thread { + class NMContainerMonitor extends SubjectInheritingThread { // The interval of milliseconds of sampling (500ms) private final static int SAMPLING_INTERVAL_MS = 500; @@ -465,7 +466,7 @@ class NMContainerMonitor extends Thread { private volatile boolean isRunning = true; @Override - public void run() { + public void work() { while (isRunning) { for (int i = 0; i < NUM_NMS; i++) { int nContainers = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java index 37b3477c8c187..7e7af2ccd1273 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -242,9 +243,9 @@ public void launchAM(ApplicationAttemptId attemptId) // read error and input streams as this would free up the buffers // free the error stream buffer - Thread errThread = new Thread() { + Thread errThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { @@ -256,9 +257,9 @@ public void run() { } } }; - Thread outThread = new Thread() { + Thread outThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { String line = inReader.readLine(); while((line != null) && !isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java index f971d7140aa44..fcb821736023e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.service.SystemServiceManager; @@ -127,7 +128,7 @@ protected void serviceStart() throws Exception { launchUserService(syncUserServices); // Create a thread and submit services in background otherwise it // block RM switch time. - serviceLaucher = new Thread(createRunnable()); + serviceLaucher = new SubjectInheritingThread(createRunnable()); serviceLaucher.setName("System service launcher"); serviceLaucher.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java index 2a9bf8d5d975f..168d24fb36b0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -26,6 +26,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -154,9 +155,9 @@ public StopResponseProto stop(StopRequestProto requestProto) // Stop the service in 2 seconds delay to make sure this rpc call is completed. // shutdown hook will be executed which will stop AM gracefully. - Thread thread = new Thread() { + Thread thread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { Thread.sleep(2000); ExitUtil.terminate(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java index 0c0a53fecd4bd..27440428838d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/utils/TestServiceApiUtil.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.service.ServiceTestUtils; import org.apache.hadoop.yarn.service.api.records.Artifact; @@ -725,9 +726,9 @@ public void testNoServiceDependencies() { @Test public void testServiceDependencies() { - Thread thread = new Thread() { + SubjectInheritingThread thread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { Service service = createExampleApplication(); Component compa = createComponent("compa"); Component compb = createComponent("compb"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java index 5656484fca126..34097ab545b01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/ContainerShellWebSocket.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.eclipse.jetty.websocket.api.Session; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose; import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect; @@ -85,7 +86,7 @@ public void onClose(Session session, int status, String reason) { public void run() { try { Reader consoleReader = new Reader(); - Thread inputThread = new Thread(consoleReader, "consoleReader"); + Thread inputThread = new SubjectInheritingThread(consoleReader, "consoleReader"); inputThread.start(); while (mySession.isOpen()) { mySession.getRemote().flush(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java index 376c9dc1b05c1..7a45603634542 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.Container; @@ -293,12 +294,12 @@ public void updateTrackingUrl(String trackingUrl) { client.updateTrackingUrl(trackingUrl); } - private class HeartbeatThread extends Thread { + private class HeartbeatThread extends SubjectInheritingThread { public HeartbeatThread() { super("AMRM Heartbeater thread"); } - public void run() { + public void work() { while (true) { Object response = null; // synchronization ensures we don't send heartbeats after unregistering @@ -337,12 +338,12 @@ public void run() { } } - private class CallbackHandlerThread extends Thread { + private class CallbackHandlerThread extends SubjectInheritingThread { public CallbackHandlerThread() { super("AMRM Callback Handler Thread"); } - public void run() { + public void work() { while (true) { if (!keepRunning) { return; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java index 4a4c50607dab7..91650f84e93df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java @@ -59,6 +59,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,7 +74,7 @@ public class NMClientAsyncImpl extends NMClientAsync { protected ThreadPoolExecutor threadPool; protected int maxThreadPoolSize; - protected Thread eventDispatcherThread; + protected SubjectInheritingThread eventDispatcherThread; protected AtomicBoolean stopped = new AtomicBoolean(false); protected BlockingQueue events = new LinkedBlockingQueue(); @@ -151,9 +152,9 @@ protected void serviceStart() throws Exception { threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue(), tf); - eventDispatcherThread = new Thread() { + eventDispatcherThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { ContainerEvent event = null; Set allNodes = new HashSet(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index c16fe03b82a43..7ba4dfaa2cfd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -63,6 +63,7 @@ import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.Priority; @@ -367,9 +368,9 @@ private static class QueueMetrics { long pendingContainers; } - private class KeyboardMonitor extends Thread { + private class KeyboardMonitor extends SubjectInheritingThread { - public void run() { + public void work() { Scanner keyboard = new Scanner(System.in, "UTF-8"); while (runKeyboardMonitor.get()) { String in = keyboard.next(); @@ -1229,7 +1230,7 @@ private String getCommandOutput(String[] command) throws IOException, private void addShutdownHook() { //clear screen when the program exits - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new SubjectInheritingThread(() -> { clearScreen(); })); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index eff68990ea8a8..4335b39f86f03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.concurrent.TimeoutException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; @@ -244,8 +245,8 @@ protected void verifyClientConnection() throws InterruptedException { } protected Thread createAndStartFailoverThread() { - Thread failoverThread = new Thread() { - public void run() { + SubjectInheritingThread failoverThread = new SubjectInheritingThread() { + public void work() { keepRunning = true; while (keepRunning) { if (cluster.getStartFailoverFlag()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java index 165569df4c736..b7ab17485d8aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestFederationRMFailoverProxyProvider.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; @@ -160,7 +161,7 @@ private void testProxyProvider(boolean facadeFlushCache) throws Exception { .getSubClusters(any(GetSubClustersInfoRequest.class)); threadResponse = null; - Thread thread = new Thread(new Runnable() { + Thread thread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java index 38f220dea8cbe..01540c5843217 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java @@ -29,6 +29,7 @@ import org.apache.hadoop.service.ServiceStateChangeListener; import org.apache.hadoop.tools.GetGroupsTestBase; import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.junit.jupiter.api.AfterAll; @@ -70,8 +71,8 @@ public void stateChanged(Service service) { resourceManager.registerServiceListener(rmStateChangeListener); resourceManager.init(conf); - new Thread() { - public void run() { + new SubjectInheritingThread() { + public void work() { resourceManager.start(); }; }.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java index f0aedf622ec76..72cd5104ef323 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; @@ -107,8 +108,8 @@ private void validateActiveRM(YarnClient client) throws IOException { } private void makeRMActive(final MiniYARNCluster cluster, final int index) { - Thread t = new Thread() { - @Override public void run() { + SubjectInheritingThread t = new SubjectInheritingThread() { + @Override public void work() { try { System.out.println("Transition rm" + index + " to active"); cluster.getResourceManager(index).getRMContext().getRMAdminService() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java index 963d01b4c90ff..fc7cc9f2c8545 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java @@ -41,6 +41,7 @@ import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -402,7 +403,7 @@ public void testUncaughtExceptionHandlerWithHAEnabled() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new Thread(new Runnable() { + final Thread testThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw rte; @@ -446,7 +447,7 @@ public void testUncaughtExceptionHandlerWithoutHA() // Create a thread and throw a RTE inside it final RuntimeException rte = new RuntimeException("TestRuntimeException"); - final Thread testThread = new Thread(new Runnable() { + final Thread testThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw rte; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java index dfdd8aa53fa29..03989ccd67b3d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java @@ -26,6 +26,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.service.ServiceStateChangeListener; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.DecommissionType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -97,8 +98,8 @@ public void stateChanged(Service service) { resourceManager.registerServiceListener(rmStateChangeListener); resourceManager.init(configuration); - new Thread() { - public void run() { + new SubjectInheritingThread() { + public void work() { resourceManager.start(); } }.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java index 1a8b8f5040362..408ed7517a7a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java @@ -47,6 +47,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -782,9 +783,9 @@ public void testOutOfOrder() throws Exception { recordFactory.newRecordInstance(ContainerLaunchContext.class); // start container from another thread - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { asyncClient.startContainerAsync(container, clc); } }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index f0da771332124..3e00c1647c25d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -23,6 +23,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; @@ -291,10 +292,10 @@ public void testSubmitApplicationInterrupted(SchedulerType type) throws IOExcept client.start(); // Submit the application and then interrupt it while its waiting // for submission to be successful. - final class SubmitThread extends Thread { + final class SubmitThread extends SubjectInheritingThread { private boolean isInterrupted = false; @Override - public void run() { + public void work() { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); ApplicationId applicationId = ApplicationId.newInstance( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java index a3436f7bbe0ba..7be22ad4d4ab6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java @@ -42,6 +42,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -218,7 +219,7 @@ protected void serviceInit(Configuration conf) throws Exception{ protected void serviceStart() throws Exception { //start all the components super.serviceStart(); - eventHandlingThread = new Thread(createThread()); + eventHandlingThread = new SubjectInheritingThread(createThread()); eventHandlingThread.setName(dispatcherThreadName); eventHandlingThread.start(); } @@ -284,7 +285,7 @@ protected void dispatch(Event event) { && (ShutdownHookManager.get().isShutdownInProgress()) == false && stopped == false) { stopped = true; - Thread shutDownThread = new Thread(createShutDownThread()); + Thread shutDownThread = new SubjectInheritingThread(createShutDownThread()); shutDownThread.setName("AsyncDispatcher ShutDown handler"); shutDownThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java index 647ab6e9481f5..a5f756ecc845e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/EventDispatcher.java @@ -28,6 +28,7 @@ import org.slf4j.MarkerFactory; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import java.util.concurrent.BlockingQueue; @@ -105,7 +106,7 @@ public void run() { public EventDispatcher(EventHandler handler, String name) { super(name); this.handler = handler; - this.eventProcessor = new Thread(new EventProcessor()); + this.eventProcessor = new SubjectInheritingThread(new EventProcessor()); this.eventProcessor.setName(getName() + ":Event Processor"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java index 0ae6c47d0ecd7..944baebfaaaa8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AbstractLivelinessMonitor.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * A simple liveliness monitor with which clients can register, trust the @@ -66,7 +67,7 @@ public AbstractLivelinessMonitor(String name) { protected void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; resetTimer(); - checkerThread = new Thread(new PingChecker()); + checkerThread = new SubjectInheritingThread(new PingChecker()); checkerThread.setName("Ping Checker for "+getName()); checkerThread.start(); super.serviceStart(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java index e0201cfcd1557..d2d2f59825559 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestYarnUncaughtExceptionHandler.java @@ -21,6 +21,7 @@ import org.junit.jupiter.api.Test; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import static org.junit.jupiter.api.Assertions.assertSame; @@ -44,7 +45,7 @@ void testUncaughtExceptionHandlerWithRuntimeException() final YarnUncaughtExceptionHandler spyYarnHandler = spy(exHandler); final YarnRuntimeException yarnException = new YarnRuntimeException( "test-yarn-runtime-exception"); - final Thread yarnThread = new Thread(new Runnable() { + final Thread yarnThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw yarnException; @@ -74,7 +75,7 @@ void testUncaughtExceptionHandlerWithError() ExitUtil.disableSystemExit(); final YarnUncaughtExceptionHandler spyErrorHandler = spy(exHandler); final java.lang.Error error = new java.lang.Error("test-error"); - final Thread errorThread = new Thread(new Runnable() { + final Thread errorThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw error; @@ -103,7 +104,7 @@ void testUncaughtExceptionHandlerWithOutOfMemoryError() ExitUtil.disableSystemHalt(); final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler); final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error"); - final Thread oomThread = new Thread(new Runnable() { + final Thread oomThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { throw oomError; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java index 054e751ff6436..9fffdf60df888 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java @@ -55,6 +55,7 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.TestContainerId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -174,8 +175,8 @@ private void writeSrcFileAndALog(Path srcFilePath, String fileName, final long l final CountDownLatch latch = new CountDownLatch(1); - Thread t = new Thread() { - public void run() { + SubjectInheritingThread t = new SubjectInheritingThread() { + public void work() { try { for (int i = 0; i < length / 3; i++) { osw.write(ch); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java index 09dfb92f1d0e6..556598eb1950f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java @@ -47,6 +47,7 @@ import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.MemInfo; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree.ProcessSmapMemoryInfo; @@ -75,8 +76,8 @@ public class TestProcfsBasedProcessTree { private static final int N = 6; // Controls the RogueTask - private class RogueTaskThread extends Thread { - public void run() { + private class RogueTaskThread extends SubjectInheritingThread { + public void work() { try { Vector args = new Vector(); if (isSetsidAvailable()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java index 78741720a171d..8089c0faceaed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.commons.collections4.map.LRUMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -283,7 +284,7 @@ public StartAndInsertTime(long startTime, long insertTime) { } } - private class EntityDeletionThread extends Thread { + private class EntityDeletionThread extends SubjectInheritingThread { private final long ttl; private final long ttlInterval; @@ -298,7 +299,7 @@ public EntityDeletionThread(Configuration conf) { } @Override - public void run() { + public void work() { while (true) { long timestamp = System.currentTimeMillis() - ttl; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index 97ff86ede271b..1d52a74c4ac91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import java.io.IOException; import java.util.ArrayList; @@ -389,7 +390,7 @@ protected void serviceStop() throws Exception { super.serviceStop(); } - private class EntityDeletionThread extends Thread { + private class EntityDeletionThread extends SubjectInheritingThread { private final long ttl; private final long ttlInterval; @@ -404,7 +405,7 @@ private class EntityDeletionThread extends Thread { } @Override - public void run() { + public void work() { Thread.currentThread().setName("Leveldb Timeline Store Retention"); while (true) { long timestamp = System.currentTimeMillis() - ttl; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java index cb59d41505deb..0d6a4d188be03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java @@ -34,13 +34,14 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; /** * Extends Thread and provides an implementation that is used for processing the * AM heart beat request asynchronously and sending back the response using the * callback method registered with the system. */ -public class AMHeartbeatRequestHandler extends Thread { +public class AMHeartbeatRequestHandler extends SubjectInheritingThread { public static final Logger LOG = LoggerFactory.getLogger(AMHeartbeatRequestHandler.class); @@ -83,7 +84,7 @@ public void shutdown() { } @Override - public void run() { + public void work() { while (keepRunning) { AsyncAllocateRequestInfo requestInfo; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java index 0ff4260c5e358..ecad69c6fd333 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedAMPoolManager.java @@ -37,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; @@ -105,7 +106,7 @@ protected void serviceStart() throws Exception { protected void serviceStop() throws Exception { if (!this.unmanagedAppMasterMap.isEmpty()) { - finishApplicationThread = new Thread(createForceFinishApplicationThread()); + finishApplicationThread = new SubjectInheritingThread(createForceFinishApplicationThread()); finishApplicationThread.setName(dispatcherThreadName); finishApplicationThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java index bb6944e1034e0..80b9c7d9b695e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/uam/TestUnmanagedApplicationManager.java @@ -33,6 +33,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; @@ -228,7 +229,7 @@ public void testSlowRegisterCall() throws YarnException, IOException, InterruptedException { // Register with wait() in RM in a separate thread - Thread registerAMThread = new Thread(new Runnable() { + Thread registerAMThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -486,10 +487,10 @@ public TestableAMRequestHandlerThread(Configuration conf, } @Override - public void run() { + public void work() { try { getUGIWithToken(attemptId).doAs((PrivilegedExceptionAction) () -> { - TestableAMRequestHandlerThread.super.run(); + TestableAMRequestHandlerThread.super.work(); return null; }); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 76d3439575c55..ad34ccc329cfc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDERR; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDOUT; @@ -851,7 +852,7 @@ public String getProcessId(ContainerId containerID) { * This class will signal a target container after a specified delay. * @see #signalContainer */ - public static class DelayedProcessKiller extends Thread { + public static class DelayedProcessKiller extends SubjectInheritingThread { private final Container container; private final String user; private final String pid; @@ -883,7 +884,7 @@ public DelayedProcessKiller(Container container, String user, String pid, } @Override - public void run() { + public void work() { try { Thread.sleep(delay); containerExecutor.signalContainer(new ContainerSignalContext.Builder() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 6110e624f8d37..d109de3c79dab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -40,6 +40,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -534,9 +535,9 @@ public String getName() { } protected void shutDown(final int exitCode) { - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { NodeManager.this.stop(); } catch (Throwable t) { @@ -559,9 +560,9 @@ protected void resyncWithRM() { // Some other thread is already created for resyncing, do nothing } else { // We have got the lock, create a new thread - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { if (!rmWorkPreservingRestartEnabled) { LOG.info("Cleaning up running containers on resync"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java index 37fa33e14fcce..bb5200c07458c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.api.records.ResourceUtilization; @@ -149,7 +150,7 @@ protected void serviceStop() throws Exception { /** * Thread that monitors the resource utilization of this node. */ - private class MonitoringThread extends Thread { + private class MonitoringThread extends SubjectInheritingThread { /** * Initialize the node resource monitoring thread. */ @@ -162,7 +163,7 @@ public MonitoringThread() { * Periodically monitor the resource utilization of the node. */ @Override - public void run() { + public void work() { while (true) { // Get node utilization and save it into the health status long pmem = resourceCalculatorPlugin.getPhysicalMemorySize() - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 5da709c49dc2b..e3b627ab80a61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -44,6 +44,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -329,7 +330,7 @@ protected void rebootNodeStatusUpdaterAndRegisterWithRM() { try { statusUpdater.join(); registerWithRM(); - statusUpdater = new Thread(statusUpdaterRunnable, "Node Status Updater"); + statusUpdater = new SubjectInheritingThread(statusUpdaterRunnable, "Node Status Updater"); this.isStopped = false; statusUpdater.start(); LOG.info("NodeStatusUpdater thread is reRegistered and restarted"); @@ -828,7 +829,7 @@ private static Map parseCredentials( protected void startStatusUpdater() { statusUpdaterRunnable = new StatusUpdaterRunnable(); statusUpdater = - new Thread(statusUpdaterRunnable, "Node Status Updater"); + new SubjectInheritingThread(statusUpdaterRunnable, "Node Status Updater"); statusUpdater.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index 78ba39ef69380..7658cc9ee0be2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -52,6 +52,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; @@ -497,10 +498,10 @@ public void validateResult() throws IOException { private Thread startStreamReader(final InputStream stream) throws IOException { - Thread streamReaderThread = new Thread() { + Thread streamReaderThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try (BufferedReader lines = new BufferedReader( new InputStreamReader(stream, StandardCharsets.UTF_8))) { char[] buf = new char[512]; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index cdd9fc916e339..9da951e0558f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -1749,9 +1750,9 @@ private void doRelaunch(final ContainerImpl container, container.sendRelaunchEvent(); } else { // wait for some time, then send launch event - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { Thread.sleep(retryInterval); container.sendRelaunchEvent(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java index e942983e01168..db486dfb4d0c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java @@ -23,6 +23,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -55,7 +56,7 @@ * events of all the containers together, and if we go over the limit picks * a container to kill. The algorithm that picks the container is a plugin. */ -public class CGroupElasticMemoryController extends Thread { +public class CGroupElasticMemoryController extends SubjectInheritingThread { protected static final Logger LOG = LoggerFactory .getLogger(CGroupElasticMemoryController.class); private final Clock clock = new MonotonicClock(); @@ -238,7 +239,7 @@ public static boolean isAvailable() { * reasons. */ @Override - public void run() { + public void work() { ExecutorService executor = null; try { // Disable OOM killer and set a limit. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index a7f0722e66f8e..fcce814fd9895 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -80,6 +80,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -861,7 +862,7 @@ private static ExecutorService createLocalizerExecutor(Configuration conf) { } - class PublicLocalizer extends Thread { + class PublicLocalizer extends SubjectInheritingThread { final FileContext lfs; final Configuration conf; @@ -975,7 +976,7 @@ private void createDir(Path dirPath, FsPermission perms) } @Override - public void run() { + public void work() { try { // TODO shutdown, better error handling esp. DU while (!Thread.currentThread().isInterrupted()) { @@ -1030,7 +1031,7 @@ public void run() { * access to user's credentials. One {@link LocalizerRunner} per localizerId. * */ - class LocalizerRunner extends Thread { + class LocalizerRunner extends SubjectInheritingThread { final LocalizerContext context; final String localizerId; @@ -1254,7 +1255,7 @@ private Path getPathForLocalization(LocalResource rsrc, @Override @SuppressWarnings("unchecked") // dispatcher not typed - public void run() { + public void work() { Path nmPrivateCTokensPath = null; Throwable exception = null; try { @@ -1405,7 +1406,7 @@ static String buildTokenFingerprint(Token tk) return fingerprint.toString(); } - static class CacheCleanup extends Thread { + static class CacheCleanup extends SubjectInheritingThread { private final Dispatcher dispatcher; @@ -1416,7 +1417,7 @@ public CacheCleanup(Dispatcher dispatcher) { @Override @SuppressWarnings("unchecked") // dispatcher not typed - public void run() { + public void work() { dispatcher.getEventHandler().handle( new LocalizationEvent(LocalizationEventType.CACHE_CLEANUP)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 0b4bd4a3fbd81..f4869c6f62a0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; @@ -489,13 +490,13 @@ boolean isProcessTreeOverLimit(ResourceCalculatorProcessTree pTree, curMemUsageOfAgedProcesses, limit); } - private class MonitoringThread extends Thread { + private class MonitoringThread extends SubjectInheritingThread { MonitoringThread() { super("Container Monitor"); } @Override - public void run() { + public void work() { while (!stopped && !Thread.currentThread().isInterrupted()) { long start = Time.monotonicNow(); @@ -884,13 +885,13 @@ private String formatUsageString(long currentVmemUsage, long vmemLimit, } } - private class LogMonitorThread extends Thread { + private class LogMonitorThread extends SubjectInheritingThread { LogMonitorThread() { super("Container Log Monitor"); } @Override - public void run() { + public void work() { while (!stopped && !Thread.currentThread().isInterrupted()) { for (Entry entry : trackingContainers.entrySet()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java index 86e1379223bc5..bb114e50c9405 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java @@ -66,6 +66,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -462,8 +463,8 @@ public void testContainerKill() throws Exception { assumeTrue(shouldRun()); final ContainerId sleepId = getNextContainerId(); - Thread t = new Thread() { - public void run() { + SubjectInheritingThread t = new SubjectInheritingThread() { + public void work() { try { runAndBlock(sleepId, "sleep", "100"); } catch (IOException|ConfigurationException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index ad7a1e7776cdf..8bf27b1bdeeb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -48,6 +48,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; @@ -744,9 +745,9 @@ protected void rebootNodeStatusUpdaterAndRegisterWithRM() { } } - class ContainerUpdateResourceThread extends Thread { + class ContainerUpdateResourceThread extends SubjectInheritingThread { @Override - public void run() { + public void work() { // Construct container resource increase request List increaseTokens = new ArrayList(); // Add increase request. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 89010bb3342e9..d663298d34c96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -69,6 +69,7 @@ import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1173,7 +1174,7 @@ protected NodeStatusUpdater createNodeStatusUpdater(Context context, assertTrue(lastService instanceof NodeStatusUpdater, "last service is NOT the node status updater"); - Thread starterThread = new Thread(() -> { + Thread starterThread = new SubjectInheritingThread(() -> { try { nm.start(); } catch (Throwable e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java index 5172e12b64e7b..984edf296bc8a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestableFederationInterceptor.java @@ -288,11 +288,11 @@ public TestableAMRequestHandlerThread(Configuration conf, } @Override - public void run() { + public void work() { try { getUGIWithToken(getAttemptId()) .doAs((PrivilegedExceptionAction) () -> { - TestableAMRequestHandlerThread.super.run(); + TestableAMRequestHandlerThread.super.work(); return null; }); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java index a30a13f0a132e..4b223f2f76a14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java @@ -74,6 +74,7 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ShellCommandExecutor; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; @@ -321,9 +322,9 @@ public void testMultipleLocalizers() throws Exception { FakeContainerLocalizer localizerB = testB.init(); // run localization - Thread threadA = new Thread() { + SubjectInheritingThread threadA = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { localizerA.runLocalization(nmAddr); } catch (Exception e) { @@ -331,9 +332,9 @@ public void run() { } } }; - Thread threadB = new Thread() { + SubjectInheritingThread threadB = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { localizerB.runLocalization(nmAddr); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java index 3b7d3011f91e3..0f69fe9bded51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.util; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor; @@ -114,9 +115,9 @@ public void testDeleteCgroup() throws Exception { fos.close(); final CountDownLatch latch = new CountDownLatch(1); - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { latch.countDown(); try { Thread.sleep(200); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index b2d5a9c9d394d..f9b410471006d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -57,6 +57,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1135,7 +1136,7 @@ private class SchedulerEventDispatcher extends SchedulerEventDispatcher(String name, int samplesPerMin) { super(scheduler, name); this.eventProcessorMonitor = - new Thread(new EventProcessorMonitor(getEventProcessorId(), + new SubjectInheritingThread(new EventProcessorMonitor(getEventProcessorId(), samplesPerMin)); this.eventProcessorMonitor .setName("ResourceManager Event Processor Monitor"); @@ -1224,7 +1225,7 @@ private synchronized void handleTransitionToStandByInNewThread() { return; } Thread standByTransitionThread = - new Thread(activeServices.standByTransitionRunnable); + new SubjectInheritingThread(activeServices.standByTransitionRunnable); standByTransitionThread.setName("StandByTransitionThread"); standByTransitionThread.start(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java index 9f4de2868a1fd..36436e892992e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java @@ -24,6 +24,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -105,14 +106,14 @@ protected void serviceStop() throws Exception { launcherPool.shutdown(); } - private class LauncherThread extends Thread { + private class LauncherThread extends SubjectInheritingThread { public LauncherThread() { super("ApplicationMaster Launcher"); } @Override - public void run() { + public void work() { while (!this.isInterrupted()) { Runnable toLaunch; try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java index 6384736d62e11..e19d5a24048a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java @@ -31,6 +31,7 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -597,7 +598,7 @@ public int incrementCurrentKeyId() { */ public void createCleanUpFinishApplicationThread(String stage) { String threadName = cleanUpThreadNamePrefix + "-" + stage; - Thread finishApplicationThread = new Thread(createCleanUpFinishApplicationThread()); + Thread finishApplicationThread = new SubjectInheritingThread(createCleanUpFinishApplicationThread()); finishApplicationThread.setName(threadName); finishApplicationThread.start(); LOG.info("CleanUpFinishApplicationThread has been started {}.", threadName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java index f1b80a946a7d0..9985e1997c8bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TimelineServiceV1Publisher.java @@ -29,6 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -536,13 +537,13 @@ public void handle(TimelineV1PublishEvent event) { } } - private class PutEventThread extends Thread { + private class PutEventThread extends SubjectInheritingThread { PutEventThread() { super("PutEventThread"); } @Override - public void run() { + public void work() { LOG.info("System metrics publisher will put events every " + String.valueOf(putEventInterval) + " milliseconds"); while (!stopped && !Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index b7d1220bf9f2d..89f7c1bfb246f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -27,6 +27,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.classification.VisibleForTesting; @@ -70,7 +71,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new SubjectInheritingThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index f0990cf8fb0a6..41797ac57e02b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -30,6 +30,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.curator.ZKCuratorManager; import org.apache.hadoop.util.curator.ZKCuratorManager.SafeTransaction; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -1468,13 +1469,13 @@ public void safeDeleteAndCheckNode(String path, List fencingACL, * Helper class that periodically attempts creating a znode to ensure that * this RM continues to be the Active. */ - private class VerifyActiveStatusThread extends Thread { + private class VerifyActiveStatusThread extends SubjectInheritingThread { VerifyActiveStatusThread() { super(VerifyActiveStatusThread.class.getName()); } @Override - public void run() { + public void work() { try { while (!isFencedState()) { // Create and delete fencing node diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 6010bd21a186e..4fb423425b26f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -45,6 +45,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringInterner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -1238,9 +1239,9 @@ public RMAppAttemptState transition(RMAppAttemptImpl appAttempt, private void retryFetchingAMContainer(final RMAppAttemptImpl appAttempt) { // start a new thread so that we are not blocking main dispatcher thread. - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { Thread.sleep(500); } catch (InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 3343c5f93118d..885665ea72d62 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -117,6 +117,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; @SuppressWarnings("unchecked") @@ -1716,9 +1717,9 @@ public void update() { * Thread which calls {@link #update()} every * updateInterval milliseconds. */ - private class UpdateThread extends Thread { + private class UpdateThread extends SubjectInheritingThread { @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { synchronized (updateThreadMonitor) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index 001c638801bf5..facb0a08b762c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -21,6 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; @@ -295,7 +296,7 @@ private void dynamicallyUpdateAppActivitiesMaxQueueLengthIfNeeded() { @Override protected void serviceStart() throws Exception { - cleanUpThread = new Thread(new Runnable() { + cleanUpThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { while (!stopped && !Thread.currentThread().isInterrupted()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 02ffe83a6df7c..e36e032c2974a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -53,6 +53,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; @@ -638,7 +639,7 @@ public void setAsyncSchedulingConf(AsyncSchedulingConfiguration conf) { this.asyncSchedulingConf = conf; } - static class AsyncScheduleThread extends Thread { + static class AsyncScheduleThread extends SubjectInheritingThread { private final CapacityScheduler cs; private AtomicBoolean runSchedules = new AtomicBoolean(false); @@ -650,7 +651,7 @@ public AsyncScheduleThread(CapacityScheduler cs) { } @Override - public void run() { + public void work() { int debuggingLogCounter = 0; while (!Thread.currentThread().isInterrupted()) { try { @@ -691,7 +692,7 @@ public void suspendSchedule() { } - static class ResourceCommitterService extends Thread { + static class ResourceCommitterService extends SubjectInheritingThread { private final CapacityScheduler cs; private BlockingQueue> backlogs = new LinkedBlockingQueue<>(); @@ -702,7 +703,7 @@ public ResourceCommitterService(CapacityScheduler cs) { } @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { ResourceCommitRequest request = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 7fab417d893cb..1ad1bf029b291 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -38,6 +38,7 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.XMLUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.security.Permission; @@ -118,7 +119,7 @@ public void serviceInit(Configuration conf) throws Exception { this.allocFile = getAllocationFile(conf); if (this.allocFile != null) { this.fs = allocFile.getFileSystem(conf); - reloadThread = new Thread(() -> { + reloadThread = new SubjectInheritingThread(() -> { while (running) { try { synchronized (this) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java index 221bb17ae5ba3..48c0c981a7e19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java @@ -19,6 +19,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Resource; @@ -39,7 +40,7 @@ /** * Thread that handles FairScheduler preemption. */ -class FSPreemptionThread extends Thread { +class FSPreemptionThread extends SubjectInheritingThread { private static final Logger LOG = LoggerFactory. getLogger(FSPreemptionThread.class); protected final FSContext context; @@ -71,7 +72,7 @@ class FSPreemptionThread extends Thread { } @Override - public void run() { + public void work() { while (!Thread.interrupted()) { try { FSAppAttempt starvedApp = context.getStarvedApps().take(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index a3e3ddfafe39f..96e0b7944fc03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -100,6 +100,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.slf4j.Logger; @@ -315,10 +316,10 @@ public QueueManager getQueueManager() { * asynchronous to the node heartbeats. */ @Deprecated - private class ContinuousSchedulingThread extends Thread { + private class ContinuousSchedulingThread extends SubjectInheritingThread { @Override - public void run() { + public void work() { while (!Thread.currentThread().isInterrupted()) { try { continuousSchedulingAttempt(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java index 38af12719efa0..4000e6de58b4c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/MultiNodeSorter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -99,7 +100,7 @@ public void serviceStart() throws Exception { assert !stopped : "starting when already stopped"; ses = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread newThread(Runnable r) { - Thread t = new Thread(r); + Thread t = new SubjectInheritingThread(r); t.setName(getName()); return t; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index 8e1214afc8095..9b4cf0a6015fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -62,6 +62,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AbstractEvent; @@ -200,7 +201,7 @@ protected void serviceStart() throws Exception { dtCancelThread.start(); if (tokenKeepAliveEnabled) { delayedRemovalThread = - new Thread(new DelayedTokenRemovalRunnable(getConfig()), + new SubjectInheritingThread(new DelayedTokenRemovalRunnable(getConfig()), "DelayedTokenCanceller"); delayedRemovalThread.start(); } @@ -347,7 +348,7 @@ public int hashCode() { } - private static class DelegationTokenCancelThread extends Thread { + private static class DelegationTokenCancelThread extends SubjectInheritingThread { private static class TokenWithConf { Token token; Configuration conf; @@ -377,7 +378,7 @@ public void cancelToken(Token token, } } - public void run() { + public void work() { TokenWithConf tokenWithConf = null; while (true) { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java index 4391c4177bb50..e701000b14744 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ACLsTestBase.java @@ -31,6 +31,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.Service.STATE; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -89,8 +90,8 @@ this.queueACLsManager, getRMContext() protected void doSecureLogin() throws IOException { } }; - new Thread() { - public void run() { + new SubjectInheritingThread() { + public void work() { resourceManager.start(); }; }.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index 7d7330ed9cf7f..bf124e51d9b83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -55,6 +55,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.service.Service.STATE; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; @@ -150,8 +151,8 @@ protected ClientRMService createClientRMService() { this.queueACLsManager, null); }; }; - new Thread() { - public void run() { + new SubjectInheritingThread() { + public void work() { UserGroupInformation.createUserForTesting(ENEMY, new String[] {}); UserGroupInformation.createUserForTesting(FRIEND, new String[] { FRIENDLY_GROUP }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 677b8ab5349a8..b3db045279a57 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -71,6 +71,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Sets; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; @@ -1532,9 +1533,9 @@ public void handle(Event rawEvent) { rmService.init(new Configuration()); // submit an app and wait for it to block while in app submission - Thread t = new Thread() { + SubjectInheritingThread t = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { rmService.submitApplication(submitRequest1); } catch (YarnException | IOException e) {} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java index a7e7253171fe1..cdd97efdbe7b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestLeaderElectorService.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -189,9 +190,9 @@ public void testExpireCurrentZKSession() throws Exception{ public void testRMFailToTransitionToActive() throws Exception{ conf.set(YarnConfiguration.RM_HA_ID, "rm1"); final AtomicBoolean throwException = new AtomicBoolean(true); - Thread launchRM = new Thread() { + SubjectInheritingThread launchRM = new SubjectInheritingThread() { @Override - public void run() { + public void work() { rm1 = new MockRM(conf, true) { @Override synchronized void transitionToActive() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index d15a02c778a86..884da531b4b70 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -22,8 +22,12 @@ import java.io.File; import java.nio.file.Files; import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; + +import org.apache.hadoop.test.AbstractHadoopTestBase; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import static org.assertj.core.api.Assertions.assertThat; @@ -78,7 +82,7 @@ import org.glassfish.jersey.jettison.internal.entity.JettisonObjectProvider; -public class TestRMHA { +public class TestRMHA extends AbstractHadoopTestBase { private static final Logger LOG = LoggerFactory.getLogger(TestRMHA.class); private Configuration configuration; private MockRM rm = null; @@ -515,7 +519,7 @@ void stopActiveServices() { rm.adminService.transitionToActive(requestInfo); // 3. Try Transition to standby - Thread t = new Thread(new Runnable() { + Thread t = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -610,7 +614,7 @@ protected void serviceStart() throws Exception { } @Test - @Timeout(value = 9000) + @Timeout(value = 10, unit = TimeUnit.MINUTES) public void testTransitionedToActiveRefreshFail() throws Exception { configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); rm = new MockRM(configuration) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java index 4895987be4c5a..ef4bc6de7d813 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHAForAsyncScheduler.java @@ -236,14 +236,16 @@ private void checkAsyncSchedulerThreads(Thread currentThread){ Thread resourceCommitterService = null; for (Thread thread : threads) { StackTraceElement[] stackTrace = thread.getStackTrace(); - if(stackTrace.length>0){ - String stackBottom = stackTrace[stackTrace.length-1].toString(); - if(stackBottom.contains("AsyncScheduleThread.run")){ - numAsyncScheduleThread++; - asyncScheduleThread = thread; - }else if(stackBottom.contains("ResourceCommitterService.run")){ - numResourceCommitterService++; - resourceCommitterService = thread; + if (stackTrace.length > 0) { + for (StackTraceElement elem : stackTrace) { + String line = elem.toString(); + if (line.contains("AsyncScheduleThread.work")) { + numAsyncScheduleThread++; + asyncScheduleThread = thread; + } else if (line.contains("ResourceCommitterService.work")) { + numResourceCommitterService++; + resourceCommitterService = thread; + } } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java index 15319a7e51585..d83b862896f1f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestFSRMStateStore.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -414,7 +415,7 @@ public void testFSRMStateStoreClientRetry() throws Exception { final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); cluster.shutdownNameNodes(); - Thread clientThread = new Thread(() -> { + Thread clientThread = new SubjectInheritingThread(() -> { try { store.storeApplicationStateInternal( ApplicationId.newInstance(100L, 1), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java index d41c8235b5c8c..7c776175c3877 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStoreZKClientConnections.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreTestBase.TestDispatcher; import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; import org.junit.jupiter.api.AfterEach; @@ -112,9 +113,9 @@ public void testZKClientRetry() throws Exception { final AtomicBoolean assertionFailedInThread = new AtomicBoolean(false); testingServer.stop(); - Thread clientThread = new Thread() { + SubjectInheritingThread clientThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { store.getData(path); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index 7a0b49b878b11..e09706002fdb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -26,6 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -101,8 +102,8 @@ public void setUp() { resourceTrackerService.start(); } - private class ThirdNodeHeartBeatThread extends Thread { - public void run() { + private class ThirdNodeHeartBeatThread extends SubjectInheritingThread { + public void work() { int lastResponseID = 0; while (!stopT) { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java index 64ac256275527..f0e74a68a38d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -817,7 +818,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { * simulate the concurrent calls for QueueMetrics#getQueueMetrics */ // thread A will keep querying the same queue metrics for a specified number of iterations - Thread threadA = new Thread(() -> { + Thread threadA = new SubjectInheritingThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics qm = QueueMetrics.getQueueMetrics().get(queueName); @@ -833,7 +834,7 @@ public void testQueueMetricsRaceCondition() throws InterruptedException { } }); // thread B will keep adding new queue metrics for a specified number of iterations - Thread threadB = new Thread(() -> { + Thread threadB = new SubjectInheritingThread(() -> { try { for (int i = 0; i < numIterations; i++) { QueueMetrics.getQueueMetrics().put("q" + i, metrics); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index c77bb26de82cc..ed76a5cad9ede 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -111,6 +111,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -1064,7 +1065,7 @@ public ApplicationMasterProtocol run() { // grab the scheduler lock from another thread // and verify an allocate call in this thread doesn't block on it final CyclicBarrier barrier = new CyclicBarrier(2); - Thread otherThread = new Thread(new Runnable() { + Thread otherThread = new SubjectInheritingThread(new Runnable() { @Override public void run() { synchronized(cs) { @@ -3088,7 +3089,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { // The scheduler thread holds the queue's read-lock for 5 seconds // then the preemption's read-lock is used - Thread schedulerThread = new Thread(() -> { + Thread schedulerThread = new SubjectInheritingThread(() -> { queue.readLock.lock(); try { Thread.sleep(5 * 1000); @@ -3101,7 +3102,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { }, "SCHEDULE"); // The complete thread locks/unlocks the queue's write-lock after 1 seconds - Thread completeThread = new Thread(() -> { + Thread completeThread = new SubjectInheritingThread(() -> { try { Thread.sleep(1000); } catch (InterruptedException e) { @@ -3115,7 +3116,7 @@ public void testRefreshQueueWithOpenPreemption() throws Exception { // The refresh thread holds the preemption's write-lock after 2 seconds // while it calls the getChildQueues(ByTryLock) that // locks(tryLocks) the queue's read-lock - Thread refreshThread = new Thread(() -> { + Thread refreshThread = new SubjectInheritingThread(() -> { try { Thread.sleep(2 * 1000); } catch (InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java index c2f6c823d168c..62e2a5cbb1157 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java @@ -31,6 +31,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.yarn.api.records.Container; @@ -800,7 +801,7 @@ public RMNodeLabelsManager createNodeLabelManager() { rm.close(); } - public static class NMHeartbeatThread extends Thread { + public static class NMHeartbeatThread extends SubjectInheritingThread { private List mockNMS; private int interval; private volatile boolean shouldStop = false; @@ -810,7 +811,7 @@ public NMHeartbeatThread(List mockNMs, int interval) { this.interval = interval; } - public void run() { + public void work() { while (true) { if (shouldStop) { break; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java index 1a682e7e06fd3..fa5f1b288a87d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodes.java @@ -39,6 +39,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; @@ -422,8 +423,8 @@ public void testAllocateOfReservedContainerFromAnotherNode() .build()); final AtomicBoolean result = new AtomicBoolean(false); - Thread t = new Thread() { - public void run() { + Thread t = new SubjectInheritingThread() { + public void work() { try { MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm1); result.set(true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java index 3edb87ebb903e..3c105a93244ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java @@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -171,8 +172,8 @@ public void testAllocateReservationFromOtherNode() throws Exception { // Launch AM in a thread and in parallel free the preempted node's // unallocated resources in main thread - Thread t1 = new Thread() { - public void run() { + Thread t1 = new SubjectInheritingThread() { + public void work() { try { MockAM am2 = MockRM.launchAM(app2, rm, nm1); result.set(true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index d977a7adab452..01326438e7414 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -62,6 +62,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -1129,7 +1130,7 @@ public void testUserLimitCache() throws Exception { // Set up allocation threads Thread[] threads = new Thread[numAllocationThreads]; for (int i = 0; i < numAllocationThreads; i++) { - threads[i] = new Thread(new Runnable() { + threads[i] = new SubjectInheritingThread(new Runnable() { @Override public void run() { try { @@ -4386,7 +4387,7 @@ public void testConcurrentAccess() throws Exception { final List conException = new ArrayList(); - Thread submitAndRemove = new Thread(new Runnable() { + Thread submitAndRemove = new SubjectInheritingThread(new Runnable() { @Override public void run() { @@ -4405,7 +4406,7 @@ public void run() { } }, "SubmitAndRemoveApplicationAttempt Thread"); - Thread getAppsInQueue = new Thread(new Runnable() { + Thread getAppsInQueue = new SubjectInheritingThread(new Runnable() { List apps = new ArrayList(); @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java index 706cdc9034cea..5958f79971392 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java @@ -37,7 +37,7 @@ private MockPreemptionThread(FairScheduler scheduler) { } @Override - public void run() { + public void work() { while (!Thread.interrupted()) { try { FSAppAttempt app = context.getStarvedApps().take(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java index f5254298dc42d..009f84b0a49b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -338,9 +339,9 @@ public void TestNodeAvailableResourceComparatorTransitivity() { } // To simulate unallocated resource changes - new Thread() { + new SubjectInheritingThread() { @Override - public void run() { + public void work() { for (int j = 0; j < 100; j++) { for (FSSchedulerNode node : clusterNodeTracker.getAllNodes()) { int i = ThreadLocalRandom.current().nextInt(-30, 30); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java index 33fdba582ea1e..653fcaee608e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java @@ -31,6 +31,7 @@ import java.util.TreeSet; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -484,9 +485,9 @@ public void testModWhileSorting(){ * Thread to simulate concurrent schedulable changes while sorting */ private Thread modificationThread(final List schedulableList) { - Thread modThread = new Thread() { + SubjectInheritingThread modThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { try { // This sleep is needed to make sure the sort has started before the // modifications start and finish diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 78c372c1b22b7..c64547cf65264 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -74,6 +74,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -994,9 +995,9 @@ public Long answer(InvocationOnMock invocation) localDtr.init(conf); localDtr.start(); // submit a job that blocks during renewal - Thread submitThread = new Thread() { + SubjectInheritingThread submitThread = new SubjectInheritingThread() { @Override - public void run() { + public void work() { localDtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user", new Configuration()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java index 6e574d79086bd..a027c87fb24c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; @@ -1735,7 +1736,7 @@ public void testSchedulerBulkActivities() throws Exception { } } - private class RESTClient extends Thread { + private class RESTClient extends SubjectInheritingThread { private int expectedCount; private boolean done = false; @@ -1754,7 +1755,7 @@ JSONObject getOutput() { } @Override - public void run() { + public void work() { WebTarget r = targetWithJsonObject(); Response response = r.path("ws").path("v1").path("cluster") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java index 8f3c4d0fe577e..fbd3b4efd0757 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/Router.java @@ -47,6 +47,7 @@ import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -202,7 +203,7 @@ protected void serviceStop() throws Exception { } protected void shutDown() { - new Thread(Router.this::stop).start(); + new SubjectInheritingThread(Router.this::stop).start(); } protected RouterClientRMService createClientRMProxyService() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java index 0c02fa1e8caae..9017188871ff1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; @@ -226,9 +227,9 @@ public void testClientPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * ClientRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends SubjectInheritingThread { private ClientRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java index d4b02f4d951f3..1430e98ed3bdd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java @@ -29,6 +29,7 @@ import java.util.Map; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; @@ -235,9 +236,9 @@ public void testRMAdminPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RMAdminRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends SubjectInheritingThread { private RMAdminRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java index ceb75e38a24a7..7ad5220d67514 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java @@ -31,6 +31,7 @@ import javax.ws.rs.core.Response; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo; @@ -297,9 +298,9 @@ public void testWebPipelineConcurrent() throws InterruptedException { * ClientTestThread is a thread to simulate a client request to get a * RESTRequestInterceptor for the user. */ - class ClientTestThread extends Thread { + class ClientTestThread extends SubjectInheritingThread { private RESTRequestInterceptor interceptor; - @Override public void run() { + @Override public void work() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java index d8a7a56ac198b..4149439ecbb99 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/cosmosdb/CosmosDBDocumentStoreReader.java @@ -24,6 +24,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Sets; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.document.NoDocumentFoundException; @@ -244,7 +245,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new SubjectInheritingThread(() -> { if (executorService != null) { executorService.shutdown(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java index 7cfb7f2fe415c..0562ce64aa29b 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/cosmosdb/CosmosDBDocumentStoreWriter.java @@ -35,6 +35,7 @@ import com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.concurrent.SubjectInheritingThread; import org.apache.hadoop.yarn.server.timelineservice.metrics.PerNodeAggTimelineCollectorMetrics; import org.apache.hadoop.yarn.server.timelineservice.documentstore.DocumentStoreUtils; import org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.CollectionType; @@ -279,7 +280,7 @@ public synchronized void close() { } private void addShutdownHook() { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { + Runtime.getRuntime().addShutdownHook(new SubjectInheritingThread(() -> { if (executorService != null) { executorService.shutdown(); }