Permalink
Browse files

setStatus and counter increment compatibility

  • Loading branch information...
1 parent c75c74d commit ffc0cdfd6ed48d12fd5c677d61e5ff2f0d5889ec @plelevier committed Aug 26, 2013
@@ -6,17 +6,18 @@
import org.apache.hadoop.mapreduce.Mapper;
import com.linkedin.camus.etl.kafka.common.EtlKey;
+import com.twitter.elephantbird.util.HadoopCompat;
/**
* KafkaETL mapper
- *
+ *
* input -- EtlKey, AvroWrapper
- *
+ *
* output -- EtlKey, AvroWrapper
- *
+ *
*/
public class EtlMapper extends Mapper<EtlKey, AvroWrapper<Object>, EtlKey, AvroWrapper<Object>> {
-
+
@Override
public void map(EtlKey key, AvroWrapper<Object> val, Context context) throws IOException, InterruptedException {
long startTime = System.currentTimeMillis();
@@ -25,6 +26,6 @@ public void map(EtlKey key, AvroWrapper<Object> val, Context context) throws IOE
long endTime = System.currentTimeMillis();
long mapTime = ((endTime - startTime));
- context.getCounter("total", "mapper-time(ms)").increment(mapTime);
+ HadoopCompat.incrementCounter(context.getCounter("total", "mapper-time(ms)"), mapTime);
}
}
@@ -219,7 +219,7 @@ public boolean nextKeyValue() throws IOException, InterruptedException {
statusMsg += statusMsg.length() > 0 ? "; " : "";
statusMsg += request.getTopic() + ":" + request.getNodeId() + ":"
+ request.getPartition();
- context.setStatus(statusMsg);
+ HadoopCompat.setStatus(context, statusMsg);
if (reader != null) {
closeReader();
@@ -233,8 +233,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException {
while (reader.getNext(key, msgValue)) {
context.progress();
- mapperContext.getCounter("total", "data-read").increment(msgValue.getLength());
- mapperContext.getCounter("total", "event-count").increment(1);
+ HadoopCompat.incrementCounter(mapperContext.getCounter("total", "data-read"), msgValue.getLength());
+ HadoopCompat.incrementCounter(mapperContext.getCounter("total", "event-count"), 1);
byte[] bytes = getBytes(msgValue);
// check the checksum of message
@@ -280,32 +280,30 @@ public boolean nextKeyValue() throws IOException, InterruptedException {
}
if (timeStamp < beginTimeStamp) {
- mapperContext.getCounter("total", "skip-old").increment(1);
+ HadoopCompat.incrementCounter(mapperContext.getCounter("total", "skip-old"), 1);
} else if (endTimeStamp == 0) {
DateTime time = new DateTime(timeStamp);
statusMsg += " begin read at " + time.toString();
- context.setStatus(statusMsg);
+ HadoopCompat.setStatus(context, statusMsg);
System.out.println(key.getTopic() + " begin read at " + time.toString());
endTimeStamp = (time.plusHours(this.maxPullHours)).getMillis();
} else if (timeStamp > endTimeStamp || System.currentTimeMillis() > maxPullTime) {
statusMsg += " max read at " + new DateTime(timeStamp).toString();
- context.setStatus(statusMsg);
+ HadoopCompat.setStatus(context, statusMsg);
System.out.println(key.getTopic() + " max read at "
+ new DateTime(timeStamp).toString());
- mapperContext.getCounter("total", "request-time(ms)").increment(
- reader.getFetchTime());
+ HadoopCompat.incrementCounter(mapperContext.getCounter("total", "request-time(ms)"), reader.getFetchTime());
closeReader();
}
long secondTime = System.currentTimeMillis();
value.datum(wrapper.getRecord());
long decodeTime = ((secondTime - tempTime));
- mapperContext.getCounter("total", "decode-time(ms)").increment(decodeTime);
+ HadoopCompat.incrementCounter(mapperContext.getCounter("total", "decode-time(ms)"), decodeTime);
if (reader != null) {
- mapperContext.getCounter("total", "request-time(ms)").increment(
- reader.getFetchTime());
+ HadoopCompat.incrementCounter(mapperContext.getCounter("total", "request-time(ms)"), reader.getFetchTime());
}
return true;
}

0 comments on commit ffc0cdf

Please sign in to comment.