Skip to content

Commit

Permalink
HIVE-2481: HadoopJobExecHelper does not handle null counters well (Ra…
Browse files Browse the repository at this point in the history
…mkumar Vadali via He Yongqiang)

git-svn-id: https://svn.apache.org/repos/asf/hive/trunk@1178981 13f79535-47bb-0310-9956-ffa450edef68
  • Loading branch information
Yongqiang He committed Oct 4, 2011
1 parent 9f8c34d commit 66bdfe1
Showing 1 changed file with 56 additions and 50 deletions.
106 changes: 56 additions & 50 deletions ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
Original file line number Diff line number Diff line change
Expand Up @@ -322,14 +322,16 @@ private MapRedStats progress(ExecDriverTaskHandle th) throws IOException {
// find out CPU msecs
// In the case that we can't find out this number, we just skip the step to print
// it out.
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > 0) {
cpuMsec = newCpuMSec;
report += ", Cumulative CPU "
+ (cpuMsec / 1000D) + " sec";
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > 0) {
cpuMsec = newCpuMSec;
report += ", Cumulative CPU "
+ (cpuMsec / 1000D) + " sec";
}
}
}

Expand Down Expand Up @@ -372,66 +374,70 @@ private MapRedStats progress(ExecDriverTaskHandle th) throws IOException {
}

//Prepare data for Client Stat Publishers (if any present) and execute them
if (clientStatPublishers.size() > 0){
if (clientStatPublishers.size() > 0 && ctrs != null){
Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
for(ClientStatsPublisher clientStatPublisher : clientStatPublishers){
clientStatPublisher.run(exctractedCounters, rj.getID().toString());
}
}

Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > cpuMsec) {
cpuMsec = newCpuMSec;
if (ctrs != null) {
Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"CPU_MILLISECONDS");
if (counterCpuMsec != null) {
long newCpuMSec = counterCpuMsec.getValue();
if (newCpuMSec > cpuMsec) {
cpuMsec = newCpuMSec;
}
}
}

MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString());

Counter ctr;
if (ctrs != null) {
Counter ctr;

ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"REDUCE_SHUFFLE_BYTES");
if (ctr != null) {
mapRedStats.setReduceShuffleBytes(ctr.getValue());
}
ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"REDUCE_SHUFFLE_BYTES");
if (ctr != null) {
mapRedStats.setReduceShuffleBytes(ctr.getValue());
}

ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"MAP_INPUT_RECORDS");
if (ctr != null) {
mapRedStats.setMapInputRecords(ctr.getValue());
}
ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"MAP_INPUT_RECORDS");
if (ctr != null) {
mapRedStats.setMapInputRecords(ctr.getValue());
}

ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"MAP_OUTPUT_RECORDS");
if (ctr != null) {
mapRedStats.setMapOutputRecords(ctr.getValue());
}
ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"MAP_OUTPUT_RECORDS");
if (ctr != null) {
mapRedStats.setMapOutputRecords(ctr.getValue());
}

ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"REDUCE_INPUT_RECORDS");
if (ctr != null) {
mapRedStats.setReduceInputRecords(ctr.getValue());
}
ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"REDUCE_INPUT_RECORDS");
if (ctr != null) {
mapRedStats.setReduceInputRecords(ctr.getValue());
}

ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"REDUCE_OUTPUT_RECORDS");
if (ctr != null) {
mapRedStats.setReduceOutputRecords(ctr.getValue());
}
ctr = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
"REDUCE_OUTPUT_RECORDS");
if (ctr != null) {
mapRedStats.setReduceOutputRecords(ctr.getValue());
}

ctr = ctrs.findCounter("FileSystemCounters",
"HDFS_BYTES_READ");
if (ctr != null) {
mapRedStats.setHdfsRead(ctr.getValue());
}
ctr = ctrs.findCounter("FileSystemCounters",
"HDFS_BYTES_READ");
if (ctr != null) {
mapRedStats.setHdfsRead(ctr.getValue());
}

ctr = ctrs.findCounter("FileSystemCounters",
"HDFS_BYTES_WRITTEN");
if (ctr != null) {
mapRedStats.setHdfsWrite(ctr.getValue());
ctr = ctrs.findCounter("FileSystemCounters",
"HDFS_BYTES_WRITTEN");
if (ctr != null) {
mapRedStats.setHdfsWrite(ctr.getValue());
}
}

this.task.setDone();
Expand Down

0 comments on commit 66bdfe1

Please sign in to comment.