Skip to content

Commit

Permalink
[PUBDEV-4331][SW-311] Override default MapReduce type to H2O type (#1065
Browse files Browse the repository at this point in the history
)

The h2odriver can specify H2O specific application master.
The application master is still based on standard MRAppMaster, however
it overrides default application type to 'H2O'.

The is an optional feature! To enable it, a launcher needs to override
default MRAppMaster by specifying the following parameter:
```
-Dmapreduce.framework.name=h2o-yarn
```

For example:
```
hadoop jar h2odriver.jar -Dmapreduce.framework.name=h2o-yarn -nodes 1 -mapperXmx 4G -output -ichal_1
```
  • Loading branch information
mmalohlava committed May 18, 2017
1 parent a680b66 commit 896cad2
Show file tree
Hide file tree
Showing 5 changed files with 249 additions and 3 deletions.
14 changes: 11 additions & 3 deletions h2o-hadoop/driverjar.gradle
Expand Up @@ -13,10 +13,18 @@ sourceSets {
main {
java {
if (project.hasProperty('notYarn')) {
srcDir '../h2o-mapreduce-generic/src/main/java'
srcDirs '../h2o-mapreduce-generic/src/main/java'
} else {
srcDirs '../h2o-mapreduce-generic/src/main/java',
'../h2o-yarn-generic/src/main/java'
}
else {
srcDirs '../h2o-mapreduce-generic/src/main/java', '../h2o-yarn-generic/src/main/java'
}
resources {
if (project.hasProperty('notYarn')) {
srcDirs '../h2o-mapreduce-generic/src/main/resources'
} else {
srcDirs '../h2o-mapreduce-generic/src/main/resources',
'../h2o-yarn-generic/src/main/resources'
}
}
}
Expand Down
@@ -0,0 +1,108 @@
package org.apache.hadoop.mapreduce.v2.app;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ConverterUtils;

import java.io.IOException;

/**
* H2O Specific MRAppMaster
*/
public class H2OMRAppMaster extends MRAppMaster {
private static final Log LOG = LogFactory.getLog(H2OMRAppMaster.class);

public H2OMRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId,
String nmHost, int nmPort, int nmHttpPort, long appSubmitTime) {
super(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort, appSubmitTime);
}

public H2OMRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
Clock clock, long appSubmitTime) {
super(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort, clock, appSubmitTime);
}

//
// This is boot code from org.apache.hadoop.mapreduce.v2.app.MRAppMaster from.
// See: https://github.com/apache/hadoop/blame/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
// for the latest changes.
//
public static void main(String[] args) {
try {
// --- Start ---
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
String containerIdStr =
System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name());
String nodeHostString = System.getenv(ApplicationConstants.Environment.NM_HOST.name());
String nodePortString = System.getenv(ApplicationConstants.Environment.NM_PORT.name());
String nodeHttpPortString =
System.getenv(ApplicationConstants.Environment.NM_HTTP_PORT.name());
String appSubmitTimeStr =
System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);

validateInputParam(containerIdStr,
ApplicationConstants.Environment.CONTAINER_ID.name());
validateInputParam(nodeHostString, ApplicationConstants.Environment.NM_HOST.name());
validateInputParam(nodePortString, ApplicationConstants.Environment.NM_PORT.name());
validateInputParam(nodeHttpPortString,
ApplicationConstants.Environment.NM_HTTP_PORT.name());
validateInputParam(appSubmitTimeStr,
ApplicationConstants.APP_SUBMIT_TIME_ENV);

ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
ApplicationAttemptId applicationAttemptId =
containerId.getApplicationAttemptId();
long appSubmitTime = Long.parseLong(appSubmitTimeStr);
// --- End ---

// We need to create H2OMRAppMaster instead of default MR master
MRAppMaster appMaster =
new H2OMRAppMaster(applicationAttemptId, containerId, nodeHostString,
Integer.parseInt(nodePortString),
Integer.parseInt(nodeHttpPortString), appSubmitTime);
// --- Start ---
ShutdownHookManager.get().addShutdownHook(
new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY);
JobConf conf = new JobConf(new YarnConfiguration());
conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));

MRWebAppUtil.initialize(conf);
String jobUserName = System
.getenv(ApplicationConstants.Environment.USER.name());
conf.set(MRJobConfig.USER_NAME, jobUserName);
// Do not automatically close FileSystem objects so that in case of
// SIGTERM I have a chance to write out the job history. I'll be closing
// the objects myself.
conf.setBoolean("fs.automatic.close", false);
initAndStartAppMaster(appMaster, conf, jobUserName);
} catch (Throwable t) {
LOG.fatal("Error starting MRAppMaster", t);
ExitUtil.terminate(1, t);
}
// -- End ---
}

private static void validateInputParam(String value, String param)
throws IOException {
if (value == null) {
String msg = param + " is null";
LOG.error(msg);
throw new IOException(msg);
}
}
}
@@ -0,0 +1,80 @@
package water.hadoop.mapred;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.ClientCache;
import org.apache.hadoop.mapred.ResourceMgrDelegate;
import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.app.H2OMRAppMaster;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;

import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Vector;

import water.util.CollectionUtils;

/**
* A H2o specific ProtocolClient
*
* In this case we use standard YARNRunner, but just override application type to H2O.
*/
public class H2OYARNRunner extends YARNRunner {

private static final Log LOG = LogFactory.getLog(H2OYARNRunner.class);

private static final String APPLICATION_MASTER_CLASS = H2OMRAppMaster.class.getName();

public H2OYARNRunner(Configuration conf) {
super(conf);
}

public H2OYARNRunner(Configuration conf,
ResourceMgrDelegate resMgrDelegate) {
super(conf, resMgrDelegate);
}

public H2OYARNRunner(Configuration conf, ResourceMgrDelegate resMgrDelegate,
ClientCache clientCache) {
super(conf, resMgrDelegate, clientCache);
}

@Override
public ApplicationSubmissionContext createApplicationSubmissionContext(Configuration jobConf,
String jobSubmitDir,
Credentials ts)
throws IOException {
// Change created app context
LOG.info("Setting application type to H2O");
ApplicationSubmissionContext appContext = super.createApplicationSubmissionContext(jobConf, jobSubmitDir, ts);
appContext.setApplicationType("H2O");
// Modify MRAppMaster commands to point to our master
LOG.info("Setting MRAppMaster to " + H2OMRAppMaster.class.toString());
ContainerLaunchContext origClc = appContext.getAMContainerSpec();
ContainerLaunchContext newClc = ContainerLaunchContext.newInstance(
origClc.getLocalResources(), origClc.getEnvironment(),
replaceMRAppMaster(origClc.getCommands()),
null, origClc.getTokens(), origClc.getApplicationACLs());
LOG.info(newClc);
appContext.setAMContainerSpec(newClc);
// And return modified context
return appContext;
}

private List<String> replaceMRAppMaster(List<String> commands) {
Vector<String> args = new Vector<String>(8);
for (String cmd : commands) {
if (cmd.contains(MRJobConfig.APPLICATION_MASTER_CLASS)) {
cmd = cmd.replace(MRJobConfig.APPLICATION_MASTER_CLASS, APPLICATION_MASTER_CLASS);
}
args.add(cmd);
}
return args;
}
}

@@ -0,0 +1,36 @@
package water.hadoop.mapred;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;

import java.io.IOException;
import java.net.InetSocketAddress;

/**
* H2O specific yarn client provider.
*
* The provider can be selected by providing: `-Dmapreduce.framework.name=h2o-yarn`
*/
public class H2OYarnClientProtocolProvider extends ClientProtocolProvider {

@Override
public ClientProtocol create(Configuration conf) throws IOException {
if ("h2o-yarn".equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
return new H2OYARNRunner(conf);
}
return null;
}

@Override
public ClientProtocol create(InetSocketAddress addr, Configuration conf)
throws IOException {
return create(conf);
}

@Override
public void close(ClientProtocol clientProtocol) throws IOException {
// nothing to do
}
}
@@ -0,0 +1,14 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
water.hadoop.mapred.H2OYarnClientProtocolProvider

0 comments on commit 896cad2

Please sign in to comment.