Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Preview of MAPREDUCE-279 merged to trunk.

  • Loading branch information...
commit 97f84ae297cd3d8a76fa7dc8a73818b79c851716 1 parent 641901a
@acmurthy acmurthy authored
Showing with 27,786 additions and 0 deletions.
  1. 0  {mapreduce → hadoop-mapreduce}/.eclipse.templates/.launches/AllMapredTests.launch
  2. 0  {mapreduce → hadoop-mapreduce}/.eclipse.templates/.launches/JobTracker.launch
  3. 0  {mapreduce → hadoop-mapreduce}/.eclipse.templates/.launches/SpecificTestTemplate.launch
  4. 0  {mapreduce → hadoop-mapreduce}/.eclipse.templates/.launches/TaskTracker.launch
  5. 0  {mapreduce → hadoop-mapreduce}/.eclipse.templates/README.txt
  6. 0  {mapreduce → hadoop-mapreduce}/.gitignore
  7. 0  {mapreduce → hadoop-mapreduce}/CHANGES.txt
  8. +89 −0 hadoop-mapreduce/INSTALL
  9. 0  {mapreduce → hadoop-mapreduce}/LICENSE.txt
  10. 0  {mapreduce → hadoop-mapreduce}/NOTICE.txt
  11. +101 −0 hadoop-mapreduce/assembly/all.xml
  12. 0  {mapreduce → hadoop-mapreduce}/bin/mapred
  13. 0  {mapreduce → hadoop-mapreduce}/bin/mapred-config.sh
  14. 0  {mapreduce → hadoop-mapreduce}/bin/start-mapred.sh
  15. 0  {mapreduce → hadoop-mapreduce}/bin/stop-mapred.sh
  16. 0  {mapreduce → hadoop-mapreduce}/build-utils.xml
  17. 0  {mapreduce → hadoop-mapreduce}/build.xml
  18. 0  {mapreduce → hadoop-mapreduce}/conf/capacity-scheduler.xml.template
  19. 0  {mapreduce → hadoop-mapreduce}/conf/configuration.xsl
  20. 0  {mapreduce → hadoop-mapreduce}/conf/fair-scheduler.xml.template
  21. 0  {mapreduce → hadoop-mapreduce}/conf/mapred-queues.xml.template
  22. 0  {mapreduce → hadoop-mapreduce}/conf/mapred-site.xml.template
  23. 0  {mapreduce → hadoop-mapreduce}/conf/taskcontroller.cfg
  24. +103 −0 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/pom.xml
  25. +67 −0 ...uce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/CustomOutputCommitter.java
  26. +455 −0 ...op-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
  27. +264 −0 .../hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
  28. +65 −0 ...hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapTaskAttemptImpl.java
  29. +64 −0 ...oop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ReduceTaskAttemptImpl.java
  30. +434 −0 ...p-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
  31. +30 −0 ...educe/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedJvmID.java
  32. +15 −0 ...ent/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedPeriodicStatsAccumulator.java
  33. +52 −0 ...r-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedProgressSplitsBlock.java
  34. +346 −0 ...apreduce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
  35. +238 −0 ...ce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
  36. +42 −0 ...lient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEvent.java
  37. +675 −0 ...adoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
  38. +231 −0 ...-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
  39. +74 −0 ...oop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AMConstants.java
  40. +55 −0 ...doop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
  41. +576 −0 ...oop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
  42. +58 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
  43. +35 −0 ...lient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
  44. +137 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
  45. +28 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java
  46. +392 −0 ...nt/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  47. +59 −0 .../hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
  48. +58 −0 ...hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java
  49. +66 −0 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
  50. +42 −0 ...-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCounterUpdateEvent.java
  51. +36 −0 ...reduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobDiagnosticsUpdateEvent.java
  52. +41 −0 ...client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEvent.java
  53. +48 −0 ...nt/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
  54. +42 −0 .../hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobFinishEvent.java
  55. +38 −0 ...educe-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobMapTaskRescheduledEvent.java
  56. +37 −0 ...uce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptCompletedEvent.java
  57. +48 −0 ...-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptFetchFailureEvent.java
  58. +43 −0 ...nt/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskEvent.java
  59. +39 −0 ...lient-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java
  60. +37 −0 ...lient-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptDiagnosticsUpdateEvent.java
  61. +41 −0 ...adoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java
  62. +55 −0 ...p-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
  63. +61 −0 ...uce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
  64. +40 −0 ...lient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEvent.java
  65. +41 −0 ...t/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEventType.java
  66. +37 −0 ...doop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptEvent.java
  67. +1,416 −0 ...r-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  68. +97 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/MapTaskImpl.java
  69. +75 −0 ...t/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/ReduceTaskImpl.java
  70. +1,442 −0 .../hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  71. +887 −0 ...-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  72. +31 −0 ...adoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
  73. +114 −0 ...-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherEvent.java
  74. +279 −0 ...p-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
  75. +40 −0 ...reduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java
  76. +100 −0 ...op-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
  77. +183 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/MRAppMetrics.java
  78. +43 −0 ...t/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/ControlledClock.java
  79. +34 −0 ...r-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java
  80. +368 −0 ...t/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
  81. +32 −0 ...ent/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocator.java
  82. +38 −0 ...adoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocatorEvent.java
  83. +18 −0 ...t/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerFailedEvent.java
  84. +68 −0 .../hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestEvent.java
  85. +280 −0 ...-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
  86. +784 −0 ...t/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
  87. +274 −0 ...t/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
  88. +78 −0 .../hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DataStatistics.java
  89. +512 −0 ...doop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
  90. +195 −0 ...p/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java
  91. +150 −0 ...educe-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/LegacyTaskRuntimeEstimator.java
  92. +72 −0 ...mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/NullTaskRuntimesEngine.java
  93. +45 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/Speculator.java
  94. +86 −0 ...hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/SpeculatorEvent.java
  95. +213 −0 ...doop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/StartEndTimesBase.java
  96. +90 −0 ...p-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
  97. +39 −0 ...preduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskSpeculationPredicate.java
  98. +28 −0 ...ent/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleaner.java
  99. +108 −0 ...hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java
  100. +56 −0 ...adoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanupEvent.java
  101. +31 −0 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java
  102. +41 −0 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java
  103. +38 −0 ...doop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/App.java
  104. +169 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
  105. +59 −0 ...-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
  106. +76 −0 ...lient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AttemptsPage.java
  107. +161 −0 ...ient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
  108. +47 −0 ...lient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
  109. +34 −0 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
  110. +254 −0 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
  111. +41 −0 ...-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
  112. +92 −0 ...r-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
  113. +67 −0 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
  114. +124 −0 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
  115. +100 −0 ...-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
  116. +45 −0 ...r-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
  117. +1 −0  ...nt/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
  118. +423 −0 ...ce/hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
  119. +192 −0 ...-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
  120. +453 −0 ...hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
  121. +236 −0 ...hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
  122. +153 −0 ...r-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
  123. +221 −0 ...hadoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
  124. +201 −0 ...adoop-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
  125. +189 −0 ...lient/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java
  126. +506 −0 .../hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
  127. +202 −0 ...op-mr-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
  128. +779 −0 ...ent/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
  129. +136 −0 ...ient/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
  130. +121 −0 .../hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
  131. +73 −0 ...oop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java
  132. +136 −0 ...lient/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java
  133. +94 −0 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/pom.xml
  134. +153 −0 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/avro/MRClientProtocol.genavro
  135. +459 −0 ...adoop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
  136. +55 −0 ...doop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
  137. +39 −0 ...lient/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
  138. +268 −0 ...nt-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
  139. +218 −0 ...-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
  140. +9 −0 ...-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptRequest.java
  141. +5 −0 ...client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptResponse.java
  142. +10 −0 ...duce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersRequest.java
  143. +9 −0 ...uce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersResponse.java
  144. +9 −0 ...e-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsRequest.java
  145. +15 −0 ...-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsResponse.java
  146. +9 −0 ...uce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportRequest.java
  147. +9 −0 ...ce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportResponse.java
  148. +13 −0 ...src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsRequest.java
  149. +16 −0 ...rc/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsResponse.java
  150. +9 −0 ...nt-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportRequest.java
  151. +9 −0 ...t-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportResponse.java
  152. +9 −0 ...ce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportRequest.java
  153. +9 −0 ...e-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportResponse.java
  154. +13 −0 ...e-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsRequest.java
  155. +16 −0 ...-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsResponse.java
  156. +9 −0 ...apreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobRequest.java
  157. +5 −0 ...preduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobResponse.java
  158. +9 −0 ...-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptRequest.java
  159. +5 −0 ...client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptResponse.java
  160. +9 −0 ...preduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskRequest.java
  161. +5 −0 ...reduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskResponse.java
  162. +91 −0 .../src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptRequestPBImpl.java
  163. +37 −0 ...src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptResponsePBImpl.java
  164. +91 −0 ...mmon/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersRequestPBImpl.java
  165. +91 −0 ...mon/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersResponsePBImpl.java
  166. +91 −0 ...n/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsRequestPBImpl.java
  167. +120 −0 .../src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsResponsePBImpl.java
  168. +91 −0 ...mon/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportRequestPBImpl.java
  169. +91 −0 ...on/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportResponsePBImpl.java
  170. +113 −0 ...org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsRequestPBImpl.java
  171. +160 −0 ...rg/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsResponsePBImpl.java
  172. +91 −0 ...main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportRequestPBImpl.java
  173. +91 −0 ...ain/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportResponsePBImpl.java
  174. +91 −0 ...on/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportRequestPBImpl.java
  175. +91 −0 ...n/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportResponsePBImpl.java
  176. +120 −0 ...n/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsRequestPBImpl.java
  177. +160 −0 .../src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsResponsePBImpl.java
  178. +91 −0 ...t-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobRequestPBImpl.java
  179. +41 −0 ...-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobResponsePBImpl.java
  180. +91 −0 .../src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptRequestPBImpl.java
  181. +41 −0 ...src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptResponsePBImpl.java
  182. +91 −0 ...-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskRequestPBImpl.java
  183. +41 −0 ...common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskResponsePBImpl.java
  184. +11 −0 ...client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counter.java
  185. +19 −0 ...t/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/CounterGroup.java
  186. +16 −0 ...lient/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Counters.java
  187. +12 −0 ...r-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobId.java
  188. +21 −0 ...ient/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
  189. +30 −0 ...lient/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobState.java
  190. +5 −0 ...r-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/Phase.java
  191. +15 −0 ...duce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEvent.java
  192. +9 −0 ...lient-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptCompletionEventStatus.java
  193. +9 −0 .../hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptId.java
  194. +24 −0 ...oop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
  195. +17 −0 ...doop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptState.java
  196. +11 −0 ...-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskId.java
  197. +42 −0 ...ent/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskReport.java
  198. +5 −0 ...ient/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskState.java
  199. +23 −0 ...lient/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskType.java
  200. +189 −0 ...duce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterGroupPBImpl.java
  201. +89 −0 ...mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CounterPBImpl.java
  202. +179 −0 ...apreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/CountersPBImpl.java
  203. +141 −0 ...p-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobIdPBImpl.java
  204. +185 −0 ...preduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
  205. +159 −0 ...mmon/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptCompletionEventPBImpl.java
  206. +124 −0 ...uce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptIdPBImpl.java
  207. +250 −0 ...client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
  208. +169 −0 ...-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskIdPBImpl.java
  209. +376 −0 ...reduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskReportPBImpl.java
  210. +249 −0 ...oop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
  211. +99 −0 ...client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHConfig.java
  212. +499 −0 ...hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
  213. +110 −0 ...nt/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobIndexInfo.java
  214. +62 −0 ...reduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
  215. +224 −0 ...doop-mr-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
  216. +82 −0 ...r-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRProtoUtils.java
  217. +20 −0 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
  218. +150 −0 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
  219. +83 −0 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
  220. +184 −0 ...mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
  221. +37 −0 ...r-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRecordFactory.java
  222. +110 −0 ...-mr-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
  223. +46 −0 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/pom.xml
  224. 0  ...jobhistory → hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/avro}/Events.avpr
  225. 0  ...oop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/filecache/DistributedCache.java
  226. 0  .../hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/filecache/package-info.java
  227. 0  ...duce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/AuditLogger.java
  228. +619 −0 ...educe/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
  229. 0  ...oop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/BasicTypeSorterBase.java
  230. 0  ...uce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/BufferSorter.java
  231. 0  ...uce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/CleanupQueue.java
  232. 0  ...-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/Clock.java
  233. 0  ...ce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/ClusterStatus.java
  234. +27 −0 ...preduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Constants.java
  235. 0  ...preduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/Counters.java
  236. 0  ...mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/CumulativePeriodicStats.java
  237. 0  ...adoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/DeprecatedQueueConfigurationParser.java
  238. 0  ...client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/FileAlreadyExistsException.java
  239. 0  .../hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/FileInputFormat.java
  240. 0  ...oop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/FileOutputCommitter.java
  241. 0  ...hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/FileOutputFormat.java
  242. 0  ...reduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/FileSplit.java
  243. 0  ...oop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/ID.java
  244. 0  ...-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/IFile.java
  245. 0  ...hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/IFileInputStream.java
  246. 0  ...adoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/IFileOutputStream.java
  247. 0  ...educe/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/IndexCache.java
  248. 0  ...duce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/InputFormat.java
  249. 0  ...educe/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/InputSplit.java
  250. 0  ...r-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/InvalidFileTypeException.java
  251. 0  ...p-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/InvalidInputException.java
  252. 0  ...mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/InvalidJobConfException.java
  253. 0  ...-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JVMId.java
  254. +105 −0 ...ce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobACLsManager.java
  255. 0  ...reduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobClient.java
  256. 0  ...apreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobConf.java
  257. 0  .../hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobConfigurable.java
  258. 0  ...educe/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobContext.java
  259. 0  ...e/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobContextImpl.java
  260. 0  ...e/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobEndNotifier.java
  261. 0  ...-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobID.java
  262. 0  ...apreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobInfo.java
  263. 0  ...duce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobPriority.java
  264. 0  ...educe/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobProfile.java
  265. 0  ...uce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobQueueInfo.java
  266. 0  ...reduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JobStatus.java
  267. 0  ...educe/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JvmContext.java
  268. 0  ...apreduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/JvmTask.java
  269. 0  ...r-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
  270. 0  ...mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/KeyValueTextInputFormat.java
  271. 0  ...hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/LineRecordReader.java
  272. 0  ...duce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/MRConstants.java
  273. +226 −0 ...uce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
  274. 0  ...oop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/MapFileOutputFormat.java
  275. 0  ...ce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/MapOutputFile.java
  276. 0  ...ce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/MapReduceBase.java
  277. 0  ...duce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/MapRunnable.java
  278. 0  ...reduce/hadoop-mr-client/hadoop-mapreduce-client-core/src/main}/java/org/apache/hadoop/mapred/MapRunner.java
Sorry, we could not display the entire diff because too many files (4,756) changed.
View
0  ...eclipse.templates/.launches/AllMapredTests.launch → ...eclipse.templates/.launches/AllMapredTests.launch
File renamed without changes
View
0  ...ce/.eclipse.templates/.launches/JobTracker.launch → ...ce/.eclipse.templates/.launches/JobTracker.launch
File renamed without changes
View
0  ...e.templates/.launches/SpecificTestTemplate.launch → ...e.templates/.launches/SpecificTestTemplate.launch
File renamed without changes
View
0  ...e/.eclipse.templates/.launches/TaskTracker.launch → ...e/.eclipse.templates/.launches/TaskTracker.launch
File renamed without changes
View
0  mapreduce/.eclipse.templates/README.txt → hadoop-mapreduce/.eclipse.templates/README.txt
File renamed without changes
View
0  mapreduce/.gitignore → hadoop-mapreduce/.gitignore
File renamed without changes
View
0  mapreduce/CHANGES.txt → hadoop-mapreduce/CHANGES.txt
File renamed without changes
View
89 hadoop-mapreduce/INSTALL
@@ -0,0 +1,89 @@
+To compile Hadoop Mapreduce next following, do the following:
+
+Step 1) Install dependencies for yarn
+
+See http://svn.apache.org/repos/asf/hadoop/common/branches/MR-279/mapreduce/yarn/README
+Make sure protbuf library is in your library path or set: export LD_LIBRARY_PATH=/usr/local/lib
+
+Step 2) Checkout
+
+svn checkout http://svn.apache.org/repos/asf/hadoop/common/branches/MR-279/
+
+Step 3) Build common
+
+Go to common directory
+ant veryclean mvn-install
+
+Step 4) Build HDFS
+
+Go to hdfs directory
+ant veryclean mvn-install -Dresolvers=internal
+
+Step 5) Build yarn and mapreduce
+
+Go to mapreduce directory
+export MAVEN_OPTS=-Xmx512m
+
+mvn clean install assembly:assembly
+ant veryclean jar jar-test -Dresolvers=internal
+
+In case you want to skip the tests run:
+
+mvn clean install assembly:assembly -DskipTests
+ant veryclean jar jar-test -Dresolvers=internal
+
+You will see a tarball in
+ls target/hadoop-mapreduce-1.0-SNAPSHOT-bin.tar.gz
+
+Step 6) Untar the tarball in a clean and different directory.
+say HADOOP_YARN_INSTALL
+
+To run Hadoop Mapreduce next applications :
+
+Step 7) cd $HADOOP_YARN_INSTALL
+
+Step 8) export the following variables:
+
+HADOOP_MAPRED_HOME=
+HADOOP_COMMON_HOME=
+HADOOP_HDFS_HOME=
+YARN_HOME=directory where you untarred yarn
+HADOOP_CONF_DIR=
+YARN_CONF_DIR=$HADOOP_CONF_DIR
+
+Step 9) bin/yarn-daemon.sh start resourcemanager
+
+Step 10) bin/yarn-daemon.sh start nodemanager
+
+Step 11) bin/yarn-daemon.sh start historyserver
+
+Step 12) Create the following symlinks in hadoop-common/lib
+
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-app-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/yarn-api-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-common-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/yarn-common-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-core-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/yarn-server-common-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/modules/hadoop-mapreduce-client-jobclient-1.0-SNAPSHOT.jar .
+ln -s $HADOOP_YARN_INSTALL/lib/protobuf-java-2.4.0a.jar .
+
+Step 13) Yarn daemons are up! But for running mapreduce applications, which now are in user land, you need to setup nodemanager with the following configuration in your yarn-site.xml before you start the nodemanager.
+ <property>
+ <name>nodemanager.auxiluary.services</name>
+ <value>mapreduce.shuffle</value>
+ </property>
+
+ <property>
+ <name>nodemanager.aux.service.mapreduce.shuffle.class</name>
+ <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+ </property>
+
+Step 14) You are all set, an example on how to run a mapreduce job is:
+
+cd $HADOOP_MAPRED_HOME
+ant examples -Dresolvers=internal
+$HADOOP_COMMON_HOME/bin/hadoop jar $HADOOP_MAPRED_HOME/build/hadoop-mapred-examples-0.22.0-SNAPSHOT.jar randomwriter -Dmapreduce.job.user.name=$USER -Dmapreduce.clientfactory.class.name=org.apache.hadoop.mapred.YarnClientFactory -Dmapreduce.randomwriter.bytespermap=10000 -Ddfs.blocksize=536870912 -Ddfs.block.size=536870912 -libjars $HADOOP_YARN_INSTALL/hadoop-mapreduce-1.0-SNAPSHOT/modules/hadoop-mapreduce-client-jobclient-1.0-SNAPSHOT.jar output
+
+The output on the command line should be almost similar to what you see in the JT/TT setup (Hadoop 0.20/0.21)
+
View
0  mapreduce/LICENSE.txt → hadoop-mapreduce/LICENSE.txt
File renamed without changes
View
0  mapreduce/NOTICE.txt → hadoop-mapreduce/NOTICE.txt
File renamed without changes
View
101 hadoop-mapreduce/assembly/all.xml
@@ -0,0 +1,101 @@
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
+ <id>all</id>
+ <formats>
+ <format>tar.gz</format>
+ </formats>
+ <includeBaseDirectory>true</includeBaseDirectory>
+ <!-- TODO: this layout is wrong. We need module specific bin files in module specific dirs -->
+ <fileSets>
+ <fileSet>
+ <directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/target/classes/bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <includes>
+ <include>container-executor</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ <fileSet>
+ <directory>hadoop-yarn/bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ <fileSet>
+ <directory>bin</directory>
+ <outputDirectory>bin</outputDirectory>
+ <includes>
+ <include>*</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ <fileSet>
+ <directory>hadoop-yarn/conf</directory>
+ <outputDirectory>conf</outputDirectory>
+ <includes>
+ <include>**/*</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <outputDirectory>sources</outputDirectory>
+ <excludes>
+ <exclude>**/*.jar</exclude>
+ <exclude>**/target/**</exclude>
+ <!-- scripts to include later for setting fileMode -->
+ <exclude>**/bin/*</exclude>
+ <exclude>**/scripts/*</exclude>
+ <!-- images that we don't need (and cause problems for our tools) -->
+ <exclude>**/dt-*/images/**</exclude>
+ <!-- until the code that does this is fixed -->
+ <exclude>**/file:/**</exclude>
+ <exclude>**/SecurityAuth.audit*</exclude>
+ </excludes>
+ <includes>
+ <include>assembly/**</include>
+ <include>pom.xml</include>
+ <include>build*.xml</include>
+ <include>ivy.xml</include>
+ <include>ivy/**</include>
+ <include>INSTALL</include>
+ <include>LICENSE.txt</include>
+ <include>mr-client/**</include>
+ <include>hadoop-yarn/**</include>
+ <include>src/**</include>
+ </includes>
+ </fileSet>
+ <fileSet>
+ <outputDirectory>sources</outputDirectory>
+ <includes>
+ <include>**/bin/*</include>
+ <include>**/scripts/*</include>
+ </includes>
+ <fileMode>0755</fileMode>
+ </fileSet>
+ </fileSets>
+ <moduleSets>
+ <moduleSet>
+ <excludes>
+ <exclude>org.apache.hadoop:hadoop-yarn-server-tests</exclude>
+ </excludes>
+ <binaries>
+ <outputDirectory>modules</outputDirectory>
+ <includeDependencies>false</includeDependencies>
+ <unpack>false</unpack>
+ </binaries>
+ </moduleSet>
+ </moduleSets>
+ <dependencySets>
+ <dependencySet>
+ <useProjectArtifact>false</useProjectArtifact>
+ <outputDirectory>/lib</outputDirectory>
+ <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+ <excludes>
+ <exclude>org.apache.hadoop:hadoop-common</exclude>
+ <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+ </excludes>
+ </dependencySet>
+ </dependencySets>
+</assembly>
View
0  mapreduce/bin/mapred → hadoop-mapreduce/bin/mapred
File renamed without changes
View
0  mapreduce/bin/mapred-config.sh → hadoop-mapreduce/bin/mapred-config.sh
File renamed without changes
View
0  mapreduce/bin/start-mapred.sh → hadoop-mapreduce/bin/start-mapred.sh
File renamed without changes
View
0  mapreduce/bin/stop-mapred.sh → hadoop-mapreduce/bin/stop-mapred.sh
File renamed without changes
View
0  mapreduce/build-utils.xml → hadoop-mapreduce/build-utils.xml
File renamed without changes
View
0  mapreduce/build.xml → hadoop-mapreduce/build.xml
File renamed without changes
View
0  mapreduce/conf/capacity-scheduler.xml.template → ...op-mapreduce/conf/capacity-scheduler.xml.template
File renamed without changes
View
0  mapreduce/conf/configuration.xsl → hadoop-mapreduce/conf/configuration.xsl
File renamed without changes
View
0  mapreduce/conf/fair-scheduler.xml.template → hadoop-mapreduce/conf/fair-scheduler.xml.template
File renamed without changes
View
0  mapreduce/conf/mapred-queues.xml.template → hadoop-mapreduce/conf/mapred-queues.xml.template
File renamed without changes
View
0  mapreduce/conf/mapred-site.xml.template → hadoop-mapreduce/conf/mapred-site.xml.template
File renamed without changes
View
0  mapreduce/conf/taskcontroller.cfg → hadoop-mapreduce/conf/taskcontroller.cfg
File renamed without changes
View
103 hadoop-mapreduce/hadoop-mr-client/hadoop-mapreduce-client-app/pom.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0"?>
+<project>
+ <parent>
+ <artifactId>hadoop-mapreduce-client</artifactId>
+ <groupId>org.apache.hadoop</groupId>
+ <version>${hadoop-mapreduce.version}</version>
+ </parent>
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-app</artifactId>
+ <name>hadoop-mapreduce-client-app</name>
+
+ <properties>
+ <install.file>${project.artifact.file}</install.file>
+ <applink.base>${project.build.directory}/${project.name}</applink.base>
+ <mr.basedir>${project.parent.parent.basedir}</mr.basedir>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <type>test-jar</type>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-common</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-common</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <!-- local name for links -->
+ <finalName>mr-app</finalName>
+ <plugins>
+ <plugin>
+ <artifactId>maven-jar-plugin</artifactId>
+ <executions>
+ <execution>
+ <goals>
+ <goal>test-jar</goal>
+ </goals>
+ <phase>test-compile</phase>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-dependency-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>build-classpath</id>
+ <phase>generate-sources</phase>
+ <goals>
+ <goal>build-classpath</goal>
+ </goals>
+ <configuration>
+ <outputFile>target/classes/mrapp-generated-classpath</outputFile>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ <plugin>
+ <artifactId>maven-antrun-plugin</artifactId>
+ <executions>
+ <execution>
+ <id>create-mr-app-symlinks</id>
+ <phase>package</phase>
+ <configuration>
+ <target>
+ <symlink link="${applink.base}.jar"
+ resource="mr-app.jar" failonerror="false"/>
+ <symlink link="${applink.base}-1.0-SNAPSHOT.jar"
+ resource="mr-app.jar" failonerror="false"/>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+</project>
View
67 .../hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/CustomOutputCommitter.java
@@ -0,0 +1,67 @@
+package org.apache.hadoop;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobContext;
+import org.apache.hadoop.mapred.OutputCommitter;
+import org.apache.hadoop.mapred.TaskAttemptContext;
+
+public class CustomOutputCommitter extends OutputCommitter {
+
+ public static final String JOB_SETUP_FILE_NAME = "_job_setup";
+ public static final String JOB_COMMIT_FILE_NAME = "_job_commit";
+ public static final String JOB_ABORT_FILE_NAME = "_job_abort";
+ public static final String TASK_SETUP_FILE_NAME = "_task_setup";
+ public static final String TASK_ABORT_FILE_NAME = "_task_abort";
+ public static final String TASK_COMMIT_FILE_NAME = "_task_commit";
+
+ @Override
+ public void setupJob(JobContext jobContext) throws IOException {
+ writeFile(jobContext.getJobConf(), JOB_SETUP_FILE_NAME);
+ }
+
+ @Override
+ public void commitJob(JobContext jobContext) throws IOException {
+ super.commitJob(jobContext);
+ writeFile(jobContext.getJobConf(), JOB_COMMIT_FILE_NAME);
+ }
+
+ @Override
+ public void abortJob(JobContext jobContext, int status)
+ throws IOException {
+ super.abortJob(jobContext, status);
+ writeFile(jobContext.getJobConf(), JOB_ABORT_FILE_NAME);
+ }
+
+ @Override
+ public void setupTask(TaskAttemptContext taskContext) throws IOException {
+ writeFile(taskContext.getJobConf(), TASK_SETUP_FILE_NAME);
+ }
+
+ @Override
+ public boolean needsTaskCommit(TaskAttemptContext taskContext)
+ throws IOException {
+ return true;
+ }
+
+ @Override
+ public void commitTask(TaskAttemptContext taskContext) throws IOException {
+ writeFile(taskContext.getJobConf(), TASK_COMMIT_FILE_NAME);
+ }
+
+ @Override
+ public void abortTask(TaskAttemptContext taskContext) throws IOException {
+ writeFile(taskContext.getJobConf(), TASK_ABORT_FILE_NAME);
+ }
+
+ private void writeFile(JobConf conf , String filename) throws IOException {
+ System.out.println("writing file ----" + filename);
+ Path outputPath = FileOutputFormat.getOutputPath(conf);
+ FileSystem fs = outputPath.getFileSystem(conf);
+ fs.create(new Path(outputPath, filename)).close();
+ }
+}
View
455 ...mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -0,0 +1,455 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.URI;
+import java.util.HashSet;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSError;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.service.AbstractService;
+
+/**
+ * Runs the container task locally in a thread.
+ * Since all (sub)tasks share the same local directory, they must be executed
+ * sequentially in order to avoid creating/deleting the same files/dirs.
+ */
+public class LocalContainerLauncher extends AbstractService implements
+ ContainerLauncher {
+
+ private static final File curDir = new File(".");
+ private static final Log LOG = LogFactory.getLog(LocalContainerLauncher.class);
+
+ private FileContext curFC = null;
+ private final HashSet<File> localizedFiles;
+ private final AppContext context;
+ private final TaskUmbilicalProtocol umbilical;
+ private Thread eventHandlingThread;
+ private BlockingQueue<ContainerLauncherEvent> eventQueue =
+ new LinkedBlockingQueue<ContainerLauncherEvent>();
+
+ public LocalContainerLauncher(AppContext context,
+ TaskUmbilicalProtocol umbilical) {
+ super(LocalContainerLauncher.class.getName());
+ this.context = context;
+ this.umbilical = umbilical;
+ // umbilical: MRAppMaster creates (taskAttemptListener), passes to us (TODO/FIXME: pointless to use RPC to talk to self; should create LocalTaskAttemptListener or similar: implement umbilical protocol but skip RPC stuff)
+
+ try {
+ curFC = FileContext.getFileContext(curDir.toURI());
+ } catch (UnsupportedFileSystemException ufse) {
+ LOG.error("Local filesystem " + curDir.toURI().toString()
+ + " is unsupported?? (should never happen)");
+ }
+
+ // Save list of files/dirs that are supposed to be present so can delete
+ // any extras created by one task before starting subsequent task. Note
+ // that there's no protection against deleted or renamed localization;
+ // users who do that get what they deserve (and will have to disable
+ // uberization in order to run correctly).
+ File[] curLocalFiles = curDir.listFiles();
+ localizedFiles = new HashSet<File>(curLocalFiles.length);
+ for (int j = 0; j < curLocalFiles.length; ++j) {
+ localizedFiles.add(curLocalFiles[j]);
+ }
+
+ // Relocalization note/future FIXME (per chrisdo, 20110315): At moment,
+ // full localization info is in AppSubmissionContext passed from client to
+ // RM and then to NM for AM-container launch: no difference between AM-
+ // localization and MapTask- or ReduceTask-localization, so can assume all
+ // OK. Longer-term, will need to override uber-AM container-localization
+ // request ("needed resources") with union of regular-AM-resources + task-
+ // resources (and, if maps and reduces ever differ, then union of all three
+ // types), OR will need localizer service/API that uber-AM can request
+ // after running (e.g., "localizeForTask()" or "localizeForMapTask()").
+ }
+
+ public void start() {
+ eventHandlingThread = new Thread(new SubtaskRunner(), "uber-SubtaskRunner");
+ eventHandlingThread.start();
+ super.start();
+ }
+
+ public void stop() {
+ eventHandlingThread.interrupt();
+ super.stop();
+ }
+
+ @Override
+ public void handle(ContainerLauncherEvent event) {
+ try {
+ eventQueue.put(event);
+ } catch (InterruptedException e) {
+ throw new YarnException(e); // FIXME? YarnException is "for runtime exceptions only"
+ }
+ }
+
+
+ /*
+ * Uber-AM lifecycle/ordering ("normal" case):
+ *
+ * - [somebody] sends TA_ASSIGNED
+ * - handled by ContainerAssignedTransition (TaskAttemptImpl.java)
+ * - creates "remoteTask" for us == real Task
+ * - sends CONTAINER_REMOTE_LAUNCH
+ * - TA: UNASSIGNED -> ASSIGNED
+ * - CONTAINER_REMOTE_LAUNCH handled by LocalContainerLauncher (us)
+ * - sucks "remoteTask" out of TaskAttemptImpl via getRemoteTask()
+ * - sends TA_CONTAINER_LAUNCHED
+ * [[ elsewhere...
+ * - TA_CONTAINER_LAUNCHED handled by LaunchedContainerTransition
+ * - registers "remoteTask" with TaskAttemptListener (== umbilical)
+ * - NUKES "remoteTask"
+ * - sends T_ATTEMPT_LAUNCHED (Task: SCHEDULED -> RUNNING)
+ * - TA: ASSIGNED -> RUNNING
+ * ]]
+ * - runs Task (runSubMap() or runSubReduce())
+ * - TA can safely send TA_UPDATE since in RUNNING state
+ * [modulo possible TA-state-machine race noted below: CHECK (TODO)]
+ */
+ private class SubtaskRunner implements Runnable {
+
+ private boolean doneWithMaps = false;
+ private int finishedSubMaps = 0;
+
+ SubtaskRunner() {
+ }
+
+ @Override
+ public void run() {
+ ContainerLauncherEvent event = null;
+
+ // _must_ either run subtasks sequentially or accept expense of new JVMs
+ // (i.e., fork()), else will get weird failures when maps try to create/
+ // write same dirname or filename: no chdir() in Java
+ while (!Thread.currentThread().isInterrupted()) {
+ try {
+ event = eventQueue.take();
+ } catch (InterruptedException e) { // mostly via T_KILL? JOB_KILL?
+ LOG.error("Returning, interrupted : " + e);
+ return;
+ }
+
+ LOG.info("Processing the event " + event.toString());
+
+ if (event.getType() == EventType.CONTAINER_REMOTE_LAUNCH) {
+
+ ContainerRemoteLaunchEvent launchEv =
+ (ContainerRemoteLaunchEvent)event;
+ TaskAttemptId attemptID = launchEv.getTaskAttemptID(); //FIXME: can attemptID ever be null? (only if retrieved over umbilical?)
+
+ Job job = context.getAllJobs().get(attemptID.getTaskId().getJobId());
+ int numMapTasks = job.getTotalMaps();
+ int numReduceTasks = job.getTotalReduces();
+
+ // YARN (tracking) Task:
+ org.apache.hadoop.mapreduce.v2.app.job.Task ytask =
+ job.getTask(attemptID.getTaskId());
+ // classic mapred Task:
+ org.apache.hadoop.mapred.Task remoteTask = launchEv.getRemoteTask();
+
+ // after "launching," send launched event to task attempt to move
+ // state from ASSIGNED to RUNNING (also nukes "remoteTask", so must
+ // do getRemoteTask() call first)
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCHED)); //FIXME: race condition here? or do we have same kind of lock on TA handler => MapTask can't send TA_UPDATE before TA_CONTAINER_LAUNCHED moves TA to RUNNING state? (probably latter)
+
+ if (numMapTasks == 0) {
+ doneWithMaps = true;
+ }
+
+ try {
+ if (remoteTask.isMapOrReduce()) {
+ JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
+ if (remoteTask.isMapTask()) {
+ jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
+ } else {
+ jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
+ }
+ context.getEventHandler().handle(jce);
+ }
+ runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks,
+ (numReduceTasks > 0));
+
+ } catch (RuntimeException re) {
+ JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
+ jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
+ context.getEventHandler().handle(jce);
+ // this is our signal that the subtask failed in some way, so
+ // simulate a failed JVM/container and send a container-completed
+ // event to task attempt (i.e., move state machine from RUNNING
+ // to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED])
+ context.getEventHandler().handle(new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_CONTAINER_COMPLETED));
+ } catch (IOException ioe) {
+ // if umbilical itself barfs (in error-handler of runSubMap()),
+ // we're pretty much hosed, so do what YarnChild main() does
+ // (i.e., exit clumsily--but can never happen, so no worries!)
+ LOG.fatal("oopsie... this can never happen: "
+ + StringUtils.stringifyException(ioe));
+ System.exit(-1);
+ }
+
+ } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
+
+ // no container to kill, so just send "cleaned" event to task attempt
+ // to move us from SUCCESS_CONTAINER_CLEANUP to SUCCEEDED state
+ // (or {FAIL|KILL}_CONTAINER_CLEANUP to {FAIL|KILL}_TASK_CLEANUP)
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(event.getTaskAttemptID(),
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+
+ } else {
+ LOG.warn("Ignoring unexpected event " + event.toString());
+ }
+
+ }
+ }
+
+ private void runSubtask(org.apache.hadoop.mapred.Task task,
+ final TaskType taskType,
+ TaskAttemptId attemptID,
+ final int numMapTasks,
+ boolean renameOutputs)
+ throws RuntimeException, IOException {
+ org.apache.hadoop.mapred.TaskAttemptID classicAttemptID =
+ TypeConverter.fromYarn(attemptID);
+
+ try {
+ JobConf conf = new JobConf(getConfig());
+
+ // mark this as an uberized subtask so it can set task counter
+ // (longer-term/FIXME: could redefine as job counter and send
+ // "JobCounterEvent" to JobImpl on [successful] completion of subtask;
+ // will need new Job state-machine transition and JobImpl jobCounters
+ // map to handle)
+ conf.setBoolean("mapreduce.task.uberized", true);
+
+ // META-FIXME: do we want the extra sanity-checking (doneWithMaps,
+ // etc.), or just assume/hope the state machine(s) and uber-AM work
+ // as expected?
+ if (taskType == TaskType.MAP) {
+ if (doneWithMaps) {
+ LOG.error("CONTAINER_REMOTE_LAUNCH contains a map task ("
+ + attemptID + "), but should be finished with maps");
+ // throw new RuntimeException() (FIXME: what's appropriate here?)
+ }
+
+ MapTask map = (MapTask)task;
+
+ //CODE-REVIEWER QUESTION: why not task.getConf() or map.getConf() instead of conf? do we need Task's localizeConfiguration() run on this first?
+ map.run(conf, umbilical);
+
+ if (renameOutputs) {
+ renameMapOutputForReduce(conf, attemptID, map.getMapOutputFile());
+ }
+ relocalize();
+
+ if (++finishedSubMaps == numMapTasks) {
+ doneWithMaps = true;
+ }
+
+ } else /* TaskType.REDUCE */ {
+
+ if (!doneWithMaps) {
+ //check if event-queue empty? whole idea of counting maps vs. checking event queue is a tad wacky...but could enforce ordering (assuming no "lost events") at LocalMRAppMaster [CURRENT BUG(?): doesn't send reduce event until maps all done]
+ LOG.error("CONTAINER_REMOTE_LAUNCH contains a reduce task ("
+ + attemptID + "), but not yet finished with maps");
+ // throw new RuntimeException() (FIXME) // or push reduce event back onto end of queue? (probably former)
+ }
+
+ ReduceTask reduce = (ReduceTask)task;
+
+ // a.k.a. "mapreduce.jobtracker.address" in LocalJobRunner:
+ conf.set(MRConfig.MASTER_ADDRESS, "local"); // bypass shuffle
+
+ reduce.run(conf, umbilical);
+ //relocalize(); // needed only if more than one reducer supported (is MAPREDUCE-434 fixed yet?)
+ }
+
+ } catch (FSError e) {
+ LOG.fatal("FSError from child", e);
+ // umbilical: MRAppMaster creates (taskAttemptListener), passes to us
+ umbilical.fsError(classicAttemptID, e.getMessage());
+ throw new RuntimeException();
+
+ } catch (Exception exception) {
+ LOG.warn("Exception running local (uberized) 'child' : "
+ + StringUtils.stringifyException(exception));
+ try {
+ if (task != null) {
+ // do cleanup for the task
+// if (childUGI == null) { // no need to job into doAs block
+ task.taskCleanup(umbilical);
+// } else {
+// final Task taskFinal = task;
+// childUGI.doAs(new PrivilegedExceptionAction<Object>() {
+// @Override
+// public Object run() throws Exception {
+// taskFinal.taskCleanup(umbilical);
+// return null;
+// }
+// });
+// }
+ }
+ } catch (Exception e) {
+ LOG.info("Exception cleaning up: "
+ + StringUtils.stringifyException(e));
+ }
+ // Report back any failures, for diagnostic purposes
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ exception.printStackTrace(new PrintStream(baos));
+// if (classicAttemptID != null) {
+ umbilical.reportDiagnosticInfo(classicAttemptID, baos.toString());
+// }
+ throw new RuntimeException();
+
+ } catch (Throwable throwable) {
+ LOG.fatal("Error running local (uberized) 'child' : "
+ + StringUtils.stringifyException(throwable));
+// if (classicAttemptID != null) {
+ Throwable tCause = throwable.getCause();
+ String cause = (tCause == null)
+ ? throwable.getMessage()
+ : StringUtils.stringifyException(tCause);
+ umbilical.fatalError(classicAttemptID, cause);
+// }
+ throw new RuntimeException();
+
+ } finally {
+/*
+FIXME: do we need to do any of this stuff? (guessing not since not in own JVM)
+ RPC.stopProxy(umbilical);
+ DefaultMetricsSystem.shutdown();
+ // Shutting down log4j of the child-vm...
+ // This assumes that on return from Task.run()
+ // there is no more logging done.
+ LogManager.shutdown();
+ */
+ }
+ }
+
+
+/* FIXME: may not need renameMapOutputForReduce() anymore? TEST!
+
+${local.dir}/usercache/$user/appcache/$appId/$contId/ == $cwd for tasks;
+contains task.sh script, which, when executed, creates symlinks and sets up env
+ "$local.dir"/usercache/$user/appcache/$appId/$contId/file.out
+ "$local.dir"/usercache/$user/appcache/$appId/$contId/file.out.idx (?)
+ "$local.dir"/usercache/$user/appcache/$appId/output/$taskId/ is where file.out* is moved after MapTask done
+
+ OHO! no further need for this at all? $taskId is unique per subtask
+ now => should work fine to leave alone. TODO: test with teragen or
+ similar
+ */
+
+ /**
+ * Within the _local_ filesystem (not HDFS), all activity takes place within
+ * a single subdir (${local.dir}/usercache/$user/appcache/$appId/$contId/),
+ * and all sub-MapTasks create the same filename ("file.out"). Rename that
+ * to something unique (e.g., "map_0.out") to avoid collisions.
+ *
+ * Longer-term, we'll modify [something] to use TaskAttemptID-based
+ * filenames instead of "file.out". (All of this is entirely internal,
+ * so there are no particular compatibility issues.)
+ */
+ private void renameMapOutputForReduce(JobConf conf, TaskAttemptId mapId,
+ MapOutputFile subMapOutputFile)
+ throws IOException {
+ FileSystem localFs = FileSystem.getLocal(conf);
+ // move map output to reduce input
+ Path mapOut = subMapOutputFile.getOutputFile();
+ Path reduceIn = subMapOutputFile.getInputFileForWrite(
+ TypeConverter.fromYarn(mapId).getTaskID(), localFs.getLength(mapOut));
+ if (!localFs.mkdirs(reduceIn.getParent())) {
+ throw new IOException("Mkdirs failed to create "
+ + reduceIn.getParent().toString());
+ }
+ if (!localFs.rename(mapOut, reduceIn))
+ throw new IOException("Couldn't rename " + mapOut);
+ }
+
+ /**
+ * Also within the local filesystem, we need to restore the initial state
+ * of the directory as much as possible. Compare current contents against
+ * the saved original state and nuke everything that doesn't belong, with
+ * the exception of the renamed map outputs (see above).
+FIXME: do we really need to worry about renamed map outputs, or already moved to output dir on commit? if latter, fix comment
+ *
+ * Any jobs that go out of their way to rename or delete things from the
+ * local directory are considered broken and deserve what they get...
+ */
+ private void relocalize() {
+ File[] curLocalFiles = curDir.listFiles();
+ for (int j = 0; j < curLocalFiles.length; ++j) {
+ if (!localizedFiles.contains(curLocalFiles[j])) {
+ // found one that wasn't there before: delete it
+ boolean deleted = false;
+ try {
+ if (curFC != null) {
+ // this is recursive, unlike File delete():
+ deleted = curFC.delete(new Path(curLocalFiles[j].getName()),true);
+ }
+ } catch (IOException e) {
+ deleted = false;
+ }
+ if (!deleted) {
+ LOG.warn("Unable to delete unexpected local file/dir "
+ + curLocalFiles[j].getName() + ": insufficient permissions?");
+ }
+ }
+ }
+ }
+
+ } // end SubtaskRunner
+
+}
View
264 ...doop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -0,0 +1,264 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Vector;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapred.TaskLog.LogName;
+import org.apache.hadoop.mapreduce.ID;
+import org.apache.hadoop.util.StringUtils;
+
+public class MapReduceChildJVM {
+ private static final String SYSTEM_PATH_SEPARATOR =
+ System.getProperty("path.separator");
+
+ private static final Log LOG = LogFactory.getLog(MapReduceChildJVM.class);
+
+ private static File getTaskLogFile(String logDir, LogName filter) {
+ return new File(logDir, filter.toString());
+ }
+
+ private static String getChildEnv(JobConf jobConf, boolean isMap) {
+ if (isMap) {
+ return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV,
+ jobConf.get(JobConf.MAPRED_TASK_ENV));
+ }
+ return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV,
+ jobConf.get(jobConf.MAPRED_TASK_ENV));
+ }
+
+ public static void setVMEnv(Map<String, String> env,
+ List<String> classPaths, String pwd, String containerLogDir,
+ String nmLdLibraryPath, Task task, CharSequence applicationTokensFile) {
+
+ JobConf conf = task.conf;
+
+ // Add classpath.
+ CharSequence cp = env.get("CLASSPATH");
+ String classpath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
+ if (null == cp) {
+ env.put("CLASSPATH", classpath);
+ } else {
+ env.put("CLASSPATH", classpath + SYSTEM_PATH_SEPARATOR + cp);
+ }
+
+ /////// Environmental variable LD_LIBRARY_PATH
+ StringBuilder ldLibraryPath = new StringBuilder();
+
+ ldLibraryPath.append(nmLdLibraryPath);
+ ldLibraryPath.append(SYSTEM_PATH_SEPARATOR);
+ ldLibraryPath.append(pwd);
+ env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
+ /////// Environmental variable LD_LIBRARY_PATH
+
+ // for the child of task jvm, set hadoop.root.logger
+ env.put("HADOOP_ROOT_LOGGER", "DEBUG,CLA"); // TODO: Debug
+
+ // TODO: The following is useful for instance in streaming tasks. Should be
+ // set in ApplicationMaster's env by the RM.
+ String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
+ if (hadoopClientOpts == null) {
+ hadoopClientOpts = "";
+ } else {
+ hadoopClientOpts = hadoopClientOpts + " ";
+ }
+ // FIXME: don't think this is also needed given we already set java
+ // properties.
+ long logSize = TaskLog.getTaskLogLength(conf);
+ Vector<String> logProps = new Vector<String>(4);
+ setupLog4jProperties(logProps, logSize, containerLogDir);
+ Iterator<String> it = logProps.iterator();
+ StringBuffer buffer = new StringBuffer();
+ while (it.hasNext()) {
+ buffer.append(" " + it.next());
+ }
+ hadoopClientOpts = hadoopClientOpts + buffer.toString();
+
+ env.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
+
+ // add the env variables passed by the user
+ String mapredChildEnv = getChildEnv(conf, task.isMapTask());
+ if (mapredChildEnv != null && mapredChildEnv.length() > 0) {
+ String childEnvs[] = mapredChildEnv.split(",");
+ for (String cEnv : childEnvs) {
+ String[] parts = cEnv.split("="); // split on '='
+ String value = (String) env.get(parts[0]);
+ if (value != null) {
+ // replace $env with the child's env constructed by tt's
+ // example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
+ value = parts[1].replace("$" + parts[0], value);
+ } else {
+ // this key is not configured by the tt for the child .. get it
+ // from the tt's env
+ // example PATH=$PATH:/tmp
+ value = System.getenv(parts[0]); // Get from NM?
+ if (value != null) {
+ // the env key is present in the tt's env
+ value = parts[1].replace("$" + parts[0], value);
+ } else {
+ // the env key is note present anywhere .. simply set it
+ // example X=$X:/tmp or X=/tmp
+ value = parts[1].replace("$" + parts[0], "");
+ }
+ }
+ env.put(parts[0], value);
+ }
+ }
+
+ //This should not be set here (If an OS check is requied. moved to ContainerLuanch)
+ // env.put("JVM_PID", "`echo $$`");
+
+ env.put(Constants.STDOUT_LOGFILE_ENV,
+ getTaskLogFile(containerLogDir, TaskLog.LogName.STDOUT).toString());
+ env.put(Constants.STDERR_LOGFILE_ENV,
+ getTaskLogFile(containerLogDir, TaskLog.LogName.STDERR).toString());
+ }
+
+ private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
+ if (isMapTask) {
+ return jobConf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, jobConf.get(
+ JobConf.MAPRED_TASK_JAVA_OPTS,
+ JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+ }
+ return jobConf
+ .get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, jobConf.get(
+ JobConf.MAPRED_TASK_JAVA_OPTS,
+ JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+ }
+
+ private static void setupLog4jProperties(Vector<String> vargs,
+ long logSize, String containerLogDir) {
+ vargs.add("-Dlog4j.configuration=container-log4j.properties");
+ vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + containerLogDir);
+ vargs.add("-Dhadoop.yarn.mr.totalLogFileSize=" + logSize);
+ }
+
+ public static List<String> getVMCommand(
+ InetSocketAddress taskAttemptListenerAddr, Task task, String javaHome,
+ String workDir, String logDir, String childTmpDir, ID jvmID) {
+
+ TaskAttemptID attemptID = task.getTaskID();
+ JobConf conf = task.conf;
+
+ Vector<String> vargs = new Vector<String>(8);
+
+ vargs.add("exec");
+ vargs.add(javaHome + "/bin/java");
+
+ // Add child (task) java-vm options.
+ //
+ // The following symbols if present in mapred.{map|reduce}.child.java.opts
+ // value are replaced:
+ // + @taskid@ is interpolated with value of TaskID.
+ // Other occurrences of @ will not be altered.
+ //
+ // Example with multiple arguments and substitutions, showing
+ // jvm GC logging, and start of a passwordless JVM JMX agent so can
+ // connect with jconsole and the likes to watch child memory, threads
+ // and get thread dumps.
+ //
+ // <property>
+ // <name>mapred.map.child.java.opts</name>
+ // <value>-Xmx 512M -verbose:gc -Xloggc:/tmp/@taskid@.gc \
+ // -Dcom.sun.management.jmxremote.authenticate=false \
+ // -Dcom.sun.management.jmxremote.ssl=false \
+ // </value>
+ // </property>
+ //
+ // <property>
+ // <name>mapred.reduce.child.java.opts</name>
+ // <value>-Xmx 1024M -verbose:gc -Xloggc:/tmp/@taskid@.gc \
+ // -Dcom.sun.management.jmxremote.authenticate=false \
+ // -Dcom.sun.management.jmxremote.ssl=false \
+ // </value>
+ // </property>
+ //
+ String javaOpts = getChildJavaOpts(conf, task.isMapTask());
+ javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
+ String [] javaOptsSplit = javaOpts.split(" ");
+
+ // Add java.library.path; necessary for loading native libraries.
+ //
+ // 1. We add the 'cwd' of the task to it's java.library.path to help
+ // users distribute native libraries via the DistributedCache.
+ // 2. The user can also specify extra paths to be added to the
+ // java.library.path via mapred.{map|reduce}.child.java.opts.
+ //
+ String libraryPath = workDir;
+ boolean hasUserLDPath = false;
+ for(int i=0; i<javaOptsSplit.length ;i++) {
+ if(javaOptsSplit[i].startsWith("-Djava.library.path=")) {
+ // TODO: Does the above take care of escaped space chars
+ javaOptsSplit[i] += SYSTEM_PATH_SEPARATOR + libraryPath;
+ hasUserLDPath = true;
+ break;
+ }
+ }
+ if(!hasUserLDPath) {
+ vargs.add("-Djava.library.path=" + libraryPath);
+ }
+ for (int i = 0; i < javaOptsSplit.length; i++) {
+ vargs.add(javaOptsSplit[i]);
+ }
+
+ if (childTmpDir != null) {
+ vargs.add("-Djava.io.tmpdir=" + childTmpDir);
+ }
+
+ // Setup the log4j prop
+ long logSize = TaskLog.getTaskLogLength(conf);
+ setupLog4jProperties(vargs, logSize, logDir);
+
+ if (conf.getProfileEnabled()) {
+ if (conf.getProfileTaskRange(task.isMapTask()
+ ).isIncluded(task.getPartition())) {
+ File prof = getTaskLogFile(logDir, TaskLog.LogName.PROFILE);
+ vargs.add(String.format(conf.getProfileParams(), prof.toString()));
+ }
+ }
+
+ // Add main class and its arguments
+ vargs.add(YarnChild.class.getName()); // main of Child
+ // pass TaskAttemptListener's address
+ vargs.add(taskAttemptListenerAddr.getAddress().getHostAddress());
+ vargs.add(Integer.toString(taskAttemptListenerAddr.getPort()));
+ vargs.add(attemptID.toString()); // pass task identifier
+
+ // Finally add the jvmID
+ vargs.add(String.valueOf(jvmID.getId()));
+ vargs.add("1>" + getTaskLogFile(logDir, TaskLog.LogName.STDERR));
+ vargs.add("2>" + getTaskLogFile(logDir, TaskLog.LogName.STDOUT));
+
+ // Final commmand
+ StringBuilder mergedCommand = new StringBuilder();
+ for (CharSequence str : vargs) {
+ mergedCommand.append(str).append(" ");
+ }
+ Vector<String> vargsFinal = new Vector<String>(1);
+ vargsFinal.add(mergedCommand.toString());
+ return vargsFinal;
+ }
+}
View
65 ...oop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapTaskAttemptImpl.java
@@ -0,0 +1,65 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+public class MapTaskAttemptImpl extends TaskAttemptImpl {
+
+ private final TaskSplitMetaInfo splitInfo;
+
+ public MapTaskAttemptImpl(TaskId taskId, int attempt,
+ EventHandler eventHandler, Path jobFile,
+ int partition, TaskSplitMetaInfo splitInfo, Configuration conf,
+ TaskAttemptListener taskAttemptListener,
+ OutputCommitter committer, Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
+ super(taskId, attempt, eventHandler,
+ taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
+ committer, jobToken, fsTokens, clock);
+ this.splitInfo = splitInfo;
+ }
+
+ @Override
+ public Task createRemoteTask() {
+ //job file name is set in TaskAttempt, setting it null here
+ MapTask mapTask =
+ new MapTask("", TypeConverter.fromYarn(getID()), partition,
+ splitInfo.getSplitIndex(), 1); // YARN doesn't have the concept of slots per task, set it as 1.
+ mapTask.setUser(conf.get(MRJobConfig.USER_NAME));
+ mapTask.setConf(conf);
+ return mapTask;
+ }
+
+}
View
64 ...-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ReduceTaskAttemptImpl.java
@@ -0,0 +1,64 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.event.EventHandler;
+
+
+public class ReduceTaskAttemptImpl extends TaskAttemptImpl {
+
+ private final int numMapTasks;
+
+ public ReduceTaskAttemptImpl(TaskId id, int attempt,
+ EventHandler eventHandler, Path jobFile, int partition,
+ int numMapTasks, Configuration conf,
+ TaskAttemptListener taskAttemptListener, OutputCommitter committer,
+ Token<JobTokenIdentifier> jobToken,
+ Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
+ super(id, attempt, eventHandler, taskAttemptListener, jobFile, partition,
+ conf, new String[] {}, committer, jobToken, fsTokens, clock);
+ this.numMapTasks = numMapTasks;
+ }
+
+ @Override
+ public Task createRemoteTask() {
+ //job file name is set in TaskAttempt, setting it null here
+ ReduceTask reduceTask =
+ new ReduceTask("", TypeConverter.fromYarn(getID()), partition,
+ numMapTasks, 1); // YARN doesn't have the concept of slots per task, set it as 1.
+ reduceTask.setUser(conf.get(MRJobConfig.USER_NAME));
+ reduceTask.setConf(conf);
+ return reduceTask;
+ }
+
+}
View
434 ...r-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
@@ -0,0 +1,434 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RPC.Server;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.mapred.SortedRanges.Range;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AMConstants;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.TaskHeartbeatHandler;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.service.CompositeService;
+
+/**
+ * This class is responsible for talking to the task umblical.
+ * It also converts all the old data structures
+ * to yarn data structures.
+ *
+ * This class HAS to be in this package to access package private
+ * methods/classes.
+ */
+public class TaskAttemptListenerImpl extends CompositeService
+ implements TaskUmbilicalProtocol, TaskAttemptListener {
+
+ private static final Log LOG = LogFactory.getLog(TaskAttemptListenerImpl.class);
+
+ private AppContext context;
+ private Server server;
+ private TaskHeartbeatHandler taskHeartbeatHandler;
+ private InetSocketAddress address;
+ private Map<WrappedJvmID, org.apache.hadoop.mapred.Task> jvmIDToAttemptMap =
+ Collections.synchronizedMap(new HashMap<WrappedJvmID,
+ org.apache.hadoop.mapred.Task>());
+ private JobTokenSecretManager jobTokenSecretManager = null;
+
+ public TaskAttemptListenerImpl(AppContext context,
+ JobTokenSecretManager jobTokenSecretManager) {
+ super(TaskAttemptListenerImpl.class.getName());
+ this.context = context;
+ this.jobTokenSecretManager = jobTokenSecretManager;
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ registerHeartbeatHandler();
+ super.init(conf);
+ }
+
+ @Override
+ public void start() {
+ startRpcServer();
+ super.start();
+ }
+
+ protected void registerHeartbeatHandler() {
+ taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(),
+ context.getClock());
+ addService(taskHeartbeatHandler);
+ }
+
+ protected void startRpcServer() {
+ Configuration conf = getConfig();
+ try {
+ server =
+ RPC.getServer(TaskUmbilicalProtocol.class, this, "0.0.0.0", 0,
+ conf.getInt(AMConstants.AM_TASK_LISTENER_THREADS,
+ AMConstants.DEFAULT_AM_TASK_LISTENER_THREADS),
+ false, conf, jobTokenSecretManager);
+ server.start();
+ InetSocketAddress listenerAddress = server.getListenerAddress();
+ this.address =
+ NetUtils.createSocketAddr(listenerAddress.getAddress()
+ .getLocalHost().getCanonicalHostName()
+ + ":" + listenerAddress.getPort());
+ } catch (IOException e) {
+ throw new YarnException(e);
+ }
+ }
+
+ @Override
+ public void stop() {
+ stopRpcServer();
+ super.stop();
+ }
+
+ protected void stopRpcServer() {
+ server.stop();
+ }
+
+ @Override
+ public InetSocketAddress getAddress() {
+ return address;
+ }
+
+ /**
+ * Child checking whether it can commit.
+ *
+ * <br/>
+ * Commit is a two-phased protocol. First the attempt informs the
+ * ApplicationMaster that it is
+ * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
+ * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
+ * a legacy from the centralized commit protocol handling by the JobTracker.
+ */
+ @Override
+ public boolean canCommit(TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("Commit go/no-go request from " + taskAttemptID.toString());
+ // An attempt is asking if it can commit its output. This can be decided
+ // only by the task which is managing the multiple attempts. So redirect the
+ // request there.
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ Job job = context.getJob(attemptID.getTaskId().getJobId());
+ Task task = job.getTask(attemptID.getTaskId());
+ return task.canCommit(attemptID);
+ }
+
+ /**
+ * TaskAttempt is reporting that it is in commit_pending and it is waiting for
+ * the commit Response
+ *
+ * <br/>
+ * Commit it a two-phased protocol. First the attempt informs the
+ * ApplicationMaster that it is
+ * {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
+ * the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
+ * a legacy from the centralized commit protocol handling by the JobTracker.
+ */
+ @Override
+ public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu)
+ throws IOException, InterruptedException {
+ LOG.info("Commit-pending state update from " + taskAttemptID.toString());
+ // An attempt is asking if it can commit its output. This can be decided
+ // only by the task which is managing the multiple attempts. So redirect the
+ // request there.
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+ //Ignorable TaskStatus? - since a task will send a LastStatusUpdate
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID,
+ TaskAttemptEventType.TA_COMMIT_PENDING));
+ }
+
+ @Override
+ public void done(TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("Done acknowledgement from " + taskAttemptID.toString());
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
+ }
+
+ @Override
+ public void fatalError(TaskAttemptID taskAttemptID, String msg)
+ throws IOException {
+ // This happens only in Child and in the Task.
+ LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
+ reportDiagnosticInfo(taskAttemptID, "Error: " + msg);
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+ }
+
+ @Override
+ public void fsError(TaskAttemptID taskAttemptID, String message)
+ throws IOException {
+ // This happens only in Child.
+ LOG.fatal("Task: " + taskAttemptID + " - failed due to FSError: "
+ + message);
+ reportDiagnosticInfo(taskAttemptID, "FSError: " + message);
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+ }
+
+ @Override
+ public void shuffleError(TaskAttemptID taskAttemptID, String message) throws IOException {
+ // TODO: This isn't really used in any MR code. Ask for removal.
+ }
+
+ @Override
+ public MapTaskCompletionEventsUpdate getMapCompletionEvents(
+ JobID jobIdentifier, int fromEventId, int maxEvents,
+ TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("MapCompletionEvents request from " + taskAttemptID.toString()
+ + ". fromEventID " + fromEventId + " maxEvents " + maxEvents);
+
+ // TODO: shouldReset is never used. See TT. Ask for Removal.
+ boolean shouldReset = false;
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[] events =
+ context.getJob(attemptID.getTaskId().getJobId()).getTaskAttemptCompletionEvents(
+ fromEventId, maxEvents);
+
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ // filter the events to return only map completion events in old format
+ List<TaskCompletionEvent> mapEvents = new ArrayList<TaskCompletionEvent>();
+ for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent event : events) {
+ if (TaskType.MAP.equals(event.getAttemptId().getTaskId().getTaskType())) {
+ mapEvents.add(TypeConverter.fromYarn(event));
+ }
+ }
+
+ return new MapTaskCompletionEventsUpdate(
+ mapEvents.toArray(new TaskCompletionEvent[0]), shouldReset);
+ }
+
+ @Override
+ public boolean ping(TaskAttemptID taskAttemptID) throws IOException {
+ LOG.info("Ping from " + taskAttemptID.toString());
+ taskHeartbeatHandler.receivedPing(TypeConverter.toYarn(taskAttemptID));
+ return true;
+ }
+
+ @Override
+ public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
+ throws IOException {
+ LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
+ + diagnosticInfo);
+
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ taskHeartbeatHandler.receivedPing(attemptID);
+
+ // This is mainly used for cases where we want to propagate exception traces
+ // of tasks that fail.
+
+ // This call exists as a hadoop mapreduce legacy wherein all changes in
+ // counters/progress/phase/output-size are reported through statusUpdate()
+ // call but not diagnosticInformation.
+ context.getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
+ }
+
+ @Override
+ public boolean statusUpdate(TaskAttemptID taskAttemptID,
+ TaskStatus taskStatus) throws IOException, InterruptedException {
+ LOG.info("Status update from " + taskAttemptID.toString());
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
+ TypeConverter.toYarn(taskAttemptID);
+ taskHeartbeatHandler.receivedPing(yarnAttemptID);
+ TaskAttemptStatus taskAttemptStatus =
+ new TaskAttemptStatus();
+ taskAttemptStatus.id = yarnAttemptID;
+ // Task sends the updated progress to the TT.
+ taskAttemptStatus.progress = taskStatus.getProgress();
+ LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
+ + taskStatus.getProgress());
+ // Task sends the diagnostic information to the TT
+ taskAttemptStatus.diagnosticInfo = taskStatus.getDiagnosticInfo();
+ // Task sends the updated state-string to the TT.
+ taskAttemptStatus.stateString = taskStatus.getStateString();
+ // Set the output-size when map-task finishes. Set by the task itself.
+ taskAttemptStatus.outputSize = taskStatus.getOutputSize();
+ // Task sends the updated phase to the TT.
+ taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
+ // Counters are updated by the task.
+ taskAttemptStatus.counters =
+ TypeConverter.toYarn(taskStatus.getCounters());
+
+ // Map Finish time set by the task (map only)
+ if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
+ taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
+ }
+
+ // Shuffle Finish time set by the task (reduce only).
+ if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
+ taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
+ }
+
+ // Sort finish time set by the task (reduce only).
+ if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
+ taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
+ }
+
+ // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
+ //taskAttemptStatus.taskState = TypeConverter.toYarn(taskStatus.getRunState());
+
+ //set the fetch failures
+ if (taskStatus.getFetchFailedMaps() != null
+ && taskStatus.getFetchFailedMaps().size() > 0) {
+ taskAttemptStatus.fetchFailedMaps =
+ new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
+ for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
+ taskAttemptStatus.fetchFailedMaps.add(
+ TypeConverter.toYarn(failedMapId));
+ }
+ }
+
+ // Task sends the information about the nextRecordRange to the TT
+
+// TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
+// taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
+// taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
+// taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
+// // This was used by TT to do counter updates only once every minute. So this
+// // isn't ever changed by the Task itself.
+// taskStatus.getIncludeCounters();
+
+ context.getEventHandler().handle(
+ new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
+ taskAttemptStatus));
+ return true;
+ }
+
+ @Override
+ public long getProtocolVersion(String arg0, long arg1) throws IOException {
+ return TaskUmbilicalProtocol.versionID;
+ }
+
+ @Override
+ public void reportNextRecordRange(TaskAttemptID taskAttemptID, Range range)
+ throws IOException {
+ // This is used when the feature of skipping records is enabled.
+
+ // This call exists as a hadoop mapreduce legacy wherein all changes in
+ // counters/progress/phase/output-size are reported through statusUpdate()
+ // call but not the next record range information.
+ throw new IOException("Not yet implemented.");
+ }
+
+ @Override
+ public JvmTask getTask(JvmContext context) throws IOException {
+
+ // A rough imitation of code from TaskTracker.
+
+ JVMId jvmId = context.jvmId;
+ LOG.info("JVM with ID : " + jvmId + " asked for a task");
+
+ // TODO: Is it an authorised container to get a task? Otherwise return null.
+
+ // TODO: Is the request for task-launch still valid?
+
+ // TODO: Child.java's firstTaskID isn't really firstTaskID. Ask for update
+ // to jobId and task-type.
+
+ WrappedJvmID wJvmID = new WrappedJvmID(jvmId.getJobId(), jvmId.isMap,
+ jvmId.getId());
+ org.apache.hadoop.mapred.Task task = jvmIDToAttemptMap.get(wJvmID);
+ if (task != null) { //there may be lag in the attempt getting added here
+ LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID());
+ JvmTask jvmTask = new JvmTask(task, false);
+
+ //remove the task as it is no more needed and free up the memory
+ jvmIDToAttemptMap.remove(wJvmID);
+
+ return jvmTask;
+ }
+ return null;
+ }
+
+ @Override
+ public void register(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID,
+ org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {
+ //create the mapping so that it is easy to look up
+ //when it comes back to ask for Task.
+ jvmIDToAttemptMap.put(jvmID, task);
+ //register this attempt
+ taskHeartbeatHandler.register(attemptID);
+ }
+
+ @Override
+ public void unregister(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID,
+ WrappedJvmID jvmID) {
+ //remove the mapping if not already removed
+ jvmIDToAttemptMap.remove(jvmID);
+
+ //unregister this attempt
+ taskHeartbeatHandler.unregister(attemptID);
+ }
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ return ProtocolSignature.getProtocolSignature(this,
+ protocol, clientVersion, clientMethodsHash);
+ }
+}
View
30 ...ce/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedJvmID.java
@@ -0,0 +1,30 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+/**
+ * A simple wrapper for increasing the visibility.
+ */
+public class WrappedJvmID extends JVMId {
+
+ public WrappedJvmID(JobID jobID, boolean mapTask, int nextInt) {
+ super(jobID, mapTask, nextInt);
+ }
+
+}
View
15 .../hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedPeriodicStatsAccumulator.java
@@ -0,0 +1,15 @@
+package org.apache.hadoop.mapred;
+
+//Workaround for PeriodicStateAccumulator being package access
+public class WrappedPeriodicStatsAccumulator {
+
+ private PeriodicStatsAccumulator real;
+
+ public WrappedPeriodicStatsAccumulator(PeriodicStatsAccumulator real) {
+ this.real = real;
+ }
+
+ public void extend(double newProgress, int newValue) {
+ real.extend(newProgress, newValue);
+ }
+}
View
52 ...lient/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedProgressSplitsBlock.java
@@ -0,0 +1,52 @@
+package org.apache.hadoop.mapred;
+
+// Workaround for ProgressSplitBlock being package access
+public class WrappedProgressSplitsBlock extends ProgressSplitsBlock {
+
+ public static final int DEFAULT_NUMBER_PROGRESS_SPLITS = 12;
+
+ private WrappedPeriodicStatsAccumulator wrappedProgressWallclockTime;
+ private WrappedPeriodicStatsAccumulator wrappedProgressCPUTime;
+ private WrappedPeriodicStatsAccumulator wrappedProgressVirtualMemoryKbytes;
+ private WrappedPeriodicStatsAccumulator wrappedProgressPhysicalMemoryKbytes;
+
+ public WrappedProgressSplitsBlock(int numberSplits) {
+ super(numberSplits);
+ }
+
+ public int[][] burst() {
+ return super.burst();
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressWallclockTime() {
+ if (wrappedProgressWallclockTime == null) {
+ wrappedProgressWallclockTime = new WrappedPeriodicStatsAccumulator(
+ progressWallclockTime);
+ }
+ return wrappedProgressWallclockTime;
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressCPUTime() {
+ if (wrappedProgressCPUTime == null) {
+ wrappedProgressCPUTime = new WrappedPeriodicStatsAccumulator(
+ progressCPUTime);
+ }
+ return wrappedProgressCPUTime;
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressVirtualMemoryKbytes() {
+ if (wrappedProgressVirtualMemoryKbytes == null) {
+ wrappedProgressVirtualMemoryKbytes = new WrappedPeriodicStatsAccumulator(
+ progressVirtualMemoryKbytes);
+ }
+ return wrappedProgressVirtualMemoryKbytes;
+ }
+
+ public WrappedPeriodicStatsAccumulator getProgressPhysicalMemoryKbytes() {
+ if (wrappedProgressPhysicalMemoryKbytes == null) {
+ wrappedProgressPhysicalMemoryKbytes = new WrappedPeriodicStatsAccumulator(
+ progressPhysicalMemoryKbytes);
+ }
+ return wrappedProgressPhysicalMemoryKbytes;
+ }
+}
View
346 ...educe/hadoop-mr-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
@@ -0,0 +1,346 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSError;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;