Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PUBDEV-4331][SW-311] Override default MapReduce type to H2O type #1065

Merged
merged 1 commit into from May 18, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 11 additions & 3 deletions h2o-hadoop/driverjar.gradle
Expand Up @@ -13,10 +13,18 @@ sourceSets {
main {
java {
if (project.hasProperty('notYarn')) {
srcDir '../h2o-mapreduce-generic/src/main/java'
srcDirs '../h2o-mapreduce-generic/src/main/java'
} else {
srcDirs '../h2o-mapreduce-generic/src/main/java',
'../h2o-yarn-generic/src/main/java'
}
else {
srcDirs '../h2o-mapreduce-generic/src/main/java', '../h2o-yarn-generic/src/main/java'
}
resources {
if (project.hasProperty('notYarn')) {
srcDirs '../h2o-mapreduce-generic/src/main/resources'
} else {
srcDirs '../h2o-mapreduce-generic/src/main/resources',
'../h2o-yarn-generic/src/main/resources'
}
}
}
Expand Down
@@ -0,0 +1,108 @@
package org.apache.hadoop.mapreduce.v2.app;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ConverterUtils;

import java.io.IOException;

/**
* H2O Specific MRAppMaster
*/
public class H2OMRAppMaster extends MRAppMaster {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need this class? It doesn't seem to do anything H2O specific at this point.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is just preparation for new changes - registration of Flow UI port in Mapred UI. Not necessary here, i can remove and include in next one.

private static final Log LOG = LogFactory.getLog(H2OMRAppMaster.class);

public H2OMRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId,
String nmHost, int nmPort, int nmHttpPort, long appSubmitTime) {
super(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort, appSubmitTime);
}

public H2OMRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
Clock clock, long appSubmitTime) {
super(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort, clock, appSubmitTime);
}

//
// This is boot code from org.apache.hadoop.mapreduce.v2.app.MRAppMaster from.
// See: https://github.com/apache/hadoop/blame/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
// for the latest changes.
//
public static void main(String[] args) {
try {
// --- Start ---
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
String containerIdStr =
System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name());
String nodeHostString = System.getenv(ApplicationConstants.Environment.NM_HOST.name());
String nodePortString = System.getenv(ApplicationConstants.Environment.NM_PORT.name());
String nodeHttpPortString =
System.getenv(ApplicationConstants.Environment.NM_HTTP_PORT.name());
String appSubmitTimeStr =
System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);

validateInputParam(containerIdStr,
ApplicationConstants.Environment.CONTAINER_ID.name());
validateInputParam(nodeHostString, ApplicationConstants.Environment.NM_HOST.name());
validateInputParam(nodePortString, ApplicationConstants.Environment.NM_PORT.name());
validateInputParam(nodeHttpPortString,
ApplicationConstants.Environment.NM_HTTP_PORT.name());
validateInputParam(appSubmitTimeStr,
ApplicationConstants.APP_SUBMIT_TIME_ENV);

ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
ApplicationAttemptId applicationAttemptId =
containerId.getApplicationAttemptId();
long appSubmitTime = Long.parseLong(appSubmitTimeStr);
// --- End ---

// We need to create H2OMRAppMaster instead of default MR master
MRAppMaster appMaster =
new H2OMRAppMaster(applicationAttemptId, containerId, nodeHostString,
Integer.parseInt(nodePortString),
Integer.parseInt(nodeHttpPortString), appSubmitTime);
// --- Start ---
ShutdownHookManager.get().addShutdownHook(
new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY);
JobConf conf = new JobConf(new YarnConfiguration());
conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));

MRWebAppUtil.initialize(conf);
String jobUserName = System
.getenv(ApplicationConstants.Environment.USER.name());
conf.set(MRJobConfig.USER_NAME, jobUserName);
// Do not automatically close FileSystem objects so that in case of
// SIGTERM I have a chance to write out the job history. I'll be closing
// the objects myself.
conf.setBoolean("fs.automatic.close", false);
initAndStartAppMaster(appMaster, conf, jobUserName);
} catch (Throwable t) {
LOG.fatal("Error starting MRAppMaster", t);
ExitUtil.terminate(1, t);
}
// -- End ---
}

private static void validateInputParam(String value, String param)
throws IOException {
if (value == null) {
String msg = param + " is null";
LOG.error(msg);
throw new IOException(msg);
}
}
}
@@ -0,0 +1,80 @@
package water.hadoop.mapred;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.ClientCache;
import org.apache.hadoop.mapred.ResourceMgrDelegate;
import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.app.H2OMRAppMaster;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;

import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Vector;

import water.util.CollectionUtils;

/**
* A H2o specific ProtocolClient
*
* In this case we use standard YARNRunner, but just override application type to H2O.
*/
public class H2OYARNRunner extends YARNRunner {

private static final Log LOG = LogFactory.getLog(H2OYARNRunner.class);

private static final String APPLICATION_MASTER_CLASS = H2OMRAppMaster.class.getName();

public H2OYARNRunner(Configuration conf) {
super(conf);
}

public H2OYARNRunner(Configuration conf,
ResourceMgrDelegate resMgrDelegate) {
super(conf, resMgrDelegate);
}

public H2OYARNRunner(Configuration conf, ResourceMgrDelegate resMgrDelegate,
ClientCache clientCache) {
super(conf, resMgrDelegate, clientCache);
}

@Override
public ApplicationSubmissionContext createApplicationSubmissionContext(Configuration jobConf,
String jobSubmitDir,
Credentials ts)
throws IOException {
// Change created app context
LOG.info("Setting application type to H2O");
ApplicationSubmissionContext appContext = super.createApplicationSubmissionContext(jobConf, jobSubmitDir, ts);
appContext.setApplicationType("H2O");
// Modify MRAppMaster commands to point to our master
LOG.info("Setting MRAppMaster to " + H2OMRAppMaster.class.toString());
ContainerLaunchContext origClc = appContext.getAMContainerSpec();
ContainerLaunchContext newClc = ContainerLaunchContext.newInstance(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we only need to set application type to H2O then we don't need a custom application master yet.

Is this meant as a groundwork for the future improvements?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes

origClc.getLocalResources(), origClc.getEnvironment(),
replaceMRAppMaster(origClc.getCommands()),
null, origClc.getTokens(), origClc.getApplicationACLs());
LOG.info(newClc);
appContext.setAMContainerSpec(newClc);
// And return modified context
return appContext;
}

private List<String> replaceMRAppMaster(List<String> commands) {
Vector<String> args = new Vector<String>(8);
for (String cmd : commands) {
if (cmd.contains(MRJobConfig.APPLICATION_MASTER_CLASS)) {
cmd = cmd.replace(MRJobConfig.APPLICATION_MASTER_CLASS, APPLICATION_MASTER_CLASS);
}
args.add(cmd);
}
return args;
}
}

@@ -0,0 +1,36 @@
package water.hadoop.mapred;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;

import java.io.IOException;
import java.net.InetSocketAddress;

/**
* H2O specific yarn client provider.
*
* The provider can be selected by providing: `-Dmapreduce.framework.name=h2o-yarn`
*/
public class H2OYarnClientProtocolProvider extends ClientProtocolProvider {

@Override
public ClientProtocol create(Configuration conf) throws IOException {
if ("h2o-yarn".equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What are the naming conventions here? I know that Apache Tez uses "yarn-h2o". Not sure if there are other examples like that. Perhaps we should use "-h2o" as a suffix instead.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Default is yarn. I did not find anything like that, but suffix makes sense, will change it.

return new H2OYARNRunner(conf);
}
return null;
}

@Override
public ClientProtocol create(InetSocketAddress addr, Configuration conf)
throws IOException {
return create(conf);
}

@Override
public void close(ClientProtocol clientProtocol) throws IOException {
// nothing to do
}
}
@@ -0,0 +1,14 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
water.hadoop.mapred.H2OYarnClientProtocolProvider