Skip to content

Commit

Permalink
HDFS-9402. Switch DataNode.LOG to use slf4j. Contributed by Walter Su.
Browse files Browse the repository at this point in the history
  • Loading branch information
Haohui Mai committed Nov 22, 2015
1 parent 4c061e6 commit 176ff5c
Show file tree
Hide file tree
Showing 12 changed files with 33 additions and 24 deletions.
2 changes: 2 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Expand Up @@ -1666,6 +1666,8 @@ Release 2.8.0 - UNRELEASED
HDFS-9439. Include status of closeAck into exception message in DataNode#run.
(Xiao Chen via Yongjun Zhang)

HDFS-9402. Switch DataNode.LOG to use slf4j. (Walter Su via wheat9)

OPTIMIZATIONS

HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
Expand Down
Expand Up @@ -22,7 +22,6 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;

import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
Expand All @@ -35,6 +34,8 @@
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;

import org.slf4j.Logger;

import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
Expand All @@ -54,7 +55,7 @@
*/
@InterfaceAudience.Private
class BPOfferService {
static final Log LOG = DataNode.LOG;
static final Logger LOG = DataNode.LOG;

/**
* Information about the namespace that this service
Expand Down
Expand Up @@ -32,7 +32,6 @@
import java.util.concurrent.atomic.AtomicBoolean;

import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
Expand Down Expand Up @@ -65,6 +64,7 @@

import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import org.slf4j.Logger;

/**
* A thread per active or standby namenode to perform:
Expand All @@ -78,7 +78,7 @@
@InterfaceAudience.Private
class BPServiceActor implements Runnable {

static final Log LOG = DataNode.LOG;
static final Logger LOG = DataNode.LOG;
final InetSocketAddress nnAddr;
HAServiceState state;

Expand Down Expand Up @@ -600,7 +600,7 @@ void join() {
private synchronized void cleanUp() {

shouldServiceRun = false;
IOUtils.cleanup(LOG, bpNamenode);
IOUtils.cleanup(null, bpNamenode);
bpos.shutdownActor(this);
}

Expand Down Expand Up @@ -834,7 +834,7 @@ public void run() {
sleepAndLogInterrupts(5000, "initializing");
} else {
runningState = RunningState.FAILED;
LOG.fatal("Initialization failed for " + this + ". Exiting. ", ioe);
LOG.error("Initialization failed for " + this + ". Exiting. ", ioe);
return;
}
}
Expand Down
Expand Up @@ -23,7 +23,6 @@
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;

import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
Expand All @@ -35,6 +34,7 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.slf4j.Logger;

/**
* Manages the BPOfferService objects for the data node.
Expand All @@ -43,7 +43,7 @@
*/
@InterfaceAudience.Private
class BlockPoolManager {
private static final Log LOG = DataNode.LOG;
private static final Logger LOG = DataNode.LOG;

private final Map<String, BPOfferService> bpByNameserviceId =
Maps.newHashMap();
Expand Down
Expand Up @@ -63,13 +63,14 @@
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.SYNC_FILE_RANGE_WRITE;

import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;

/** A class that receives a block and writes to its own disk, meanwhile
* may copies it to another site. If a throttler is provided,
* streaming throttling is also supported.
**/
class BlockReceiver implements Closeable {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;

@VisibleForTesting
Expand Down Expand Up @@ -960,7 +961,7 @@ void receiveBlock(
// The worst case is not recovering this RBW replica.
// Client will fall back to regular pipeline recovery.
} finally {
IOUtils.cleanup(LOG, out);
IOUtils.closeStream(out);
}
try {
// Even if the connection is closed after the ack packet is
Expand Down
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.datanode;

import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
Expand All @@ -32,6 +31,7 @@
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.Daemon;
import org.slf4j.Logger;

import java.io.IOException;
import java.util.ArrayList;
Expand All @@ -44,7 +44,7 @@
*/
@InterfaceAudience.Private
public class BlockRecoveryWorker {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;

private final DataNode datanode;
private final Configuration conf;
Expand Down
Expand Up @@ -55,6 +55,7 @@

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;

/**
* Reads a block from the disk and sends it to a recipient.
Expand Down Expand Up @@ -97,7 +98,7 @@
* no checksum error, it replies to DataNode with OP_STATUS_CHECKSUM_OK.
*/
class BlockSender implements java.io.Closeable {
static final Log LOG = DataNode.LOG;
static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
private static final boolean is32Bit =
System.getProperty("sun.arch.data.model").equals("32");
Expand Down
Expand Up @@ -212,6 +212,8 @@
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Lists;
import com.google.protobuf.BlockingService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**********************************************************
* DataNode is a class (and program) that stores a set of
Expand Down Expand Up @@ -248,7 +250,7 @@
public class DataNode extends ReconfigurableBase
implements InterDatanodeProtocol, ClientDatanodeProtocol,
TraceAdminProtocol, DataNodeMXBean {
public static final Log LOG = LogFactory.getLog(DataNode.class);
public static final Logger LOG = LoggerFactory.getLogger(DataNode.class);

static{
HdfsConfiguration.init();
Expand Down Expand Up @@ -2605,7 +2607,7 @@ public static void secureMain(String args[], SecureResources resources) {
errorCode = 1;
}
} catch (Throwable e) {
LOG.fatal("Exception in secureMain", e);
LOG.error("Exception in secureMain", e);
terminate(1, e);
} finally {
// We need to terminate the process here because either shutdown was called
Expand Down
Expand Up @@ -89,13 +89,14 @@
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;


/**
* Thread for processing incoming/outgoing data stream.
*/
class DataXceiver extends Receiver implements Runnable {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;

private Peer peer;
Expand Down Expand Up @@ -376,7 +377,7 @@ public void requestShortCircuitFds(final ExtendedBlock blk,
blk.getBlockId(), dnR.getDatanodeUuid(), success));
}
if (fis != null) {
IOUtils.cleanup(LOG, fis);
IOUtils.cleanup(null, fis);
}
}
}
Expand Down
Expand Up @@ -22,7 +22,6 @@
import java.nio.channels.AsynchronousCloseException;
import java.util.HashMap;

import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.net.Peer;
Expand All @@ -32,6 +31,7 @@
import org.apache.hadoop.util.Daemon;

import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;

/**
* Server used for receiving/sending a block of data.
Expand All @@ -40,7 +40,7 @@
* Hadoop IPC mechanism.
*/
class DataXceiverServer implements Runnable {
public static final Log LOG = DataNode.LOG;
public static final Logger LOG = DataNode.LOG;

private final PeerServer peerServer;
private final DataNode datanode;
Expand Down Expand Up @@ -262,7 +262,7 @@ synchronized void restartNotifyPeers() {
synchronized void closeAllPeers() {
LOG.info("Closing all peers.");
for (Peer p : peers.keySet()) {
IOUtils.cleanup(LOG, p);
IOUtils.cleanup(null, p);
}
peers.clear();
peersXceiver.clear();
Expand Down
Expand Up @@ -44,7 +44,6 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
Expand Down Expand Up @@ -78,6 +77,7 @@
import org.apache.hadoop.util.DataChecksum;

import com.google.common.base.Preconditions;
import org.slf4j.Logger;

/**
* ErasureCodingWorker handles the erasure coding recovery work commands. These
Expand All @@ -87,7 +87,7 @@
*/
@InterfaceAudience.Private
public final class ErasureCodingWorker {
private static final Log LOG = DataNode.LOG;
private static final Logger LOG = DataNode.LOG;

private final DataNode datanode;
private final Configuration conf;
Expand Down Expand Up @@ -837,7 +837,7 @@ private Peer newConnectedPeer(ExtendedBlock b, InetSocketAddress addr,
return peer;
} finally {
if (!success) {
IOUtils.cleanup(LOG, peer);
IOUtils.cleanup(null, peer);
IOUtils.closeSocket(sock);
}
}
Expand Down
Expand Up @@ -38,14 +38,15 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.log4j.Level;

public class TestDistCh extends junit.framework.TestCase {
{
((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
).getLogger().setLevel(Level.ERROR);
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ERROR);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ERROR);
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ERROR);
}

Expand Down

0 comments on commit 176ff5c

Please sign in to comment.