Skip to content

Commit

Permalink
Retiring the feature spring impala.
Browse files Browse the repository at this point in the history
  • Loading branch information
dulizhao committed Mar 5, 2015
1 parent e2f970b commit 9c43e71
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 218 deletions.
2 changes: 1 addition & 1 deletion cli/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -1112,5 +1112,5 @@ Compute nodes are re-commissioned and powered-on or decommissioned and powered-o
</tr>
</table>
## 6. Hadoop Commands
From CLI 0.6.0, we integrated impala(https://github.com/SpringSource/impala) hadoop hdfs, map/reduce, pig, and hive commands into CLI. You need to use "cluster target" command to set hdfs or jobtracker url before launching hdfs or map/reduce commands. More details can be found from CLI help.
From CLI 2.2.0, we retired impala(https://github.com/SpringSource/impala).

35 changes: 15 additions & 20 deletions cli/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -31,36 +31,31 @@
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
</dependency>
<!-- External modules -->
<dependency>
<groupId>com.vmware.bdd.impala.plugin.hdfs</groupId>
<artifactId>serengeti-impala-plugin-hdfs</artifactId>
</dependency>
<dependency>
<groupId>com.vmware.bdd.impala.plugin.mapreduce</groupId>
<artifactId>serengeti-impala-plugin-mapreduce</artifactId>
</dependency>
<dependency>
<groupId>com.vmware.bdd.impala.plugin.pig</groupId>
<artifactId>serengeti-impala-plugin-pig</artifactId>
</dependency>
<dependency>
<groupId>com.vmware.bdd.impala.plugin.hive</groupId>
<artifactId>serengeti-impala-plugin-hive</artifactId>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.3.4</version>
</dependency>
<!-- External modules -->
<dependency>
<groupId>org.springframework.shell</groupId>
<artifactId>spring-shell</artifactId>
<version>${spring.shell.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-examples</artifactId>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>3.1</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
<version>1.7.1</version>
</dependency>
<dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr-runtime</artifactId>
<version>${antlr.version}</version>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<version>1.7.1</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
Expand Down
161 changes: 1 addition & 160 deletions cli/src/main/java/com/vmware/bdd/cli/commands/ClusterCommands.java
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,8 @@

import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.hadoop.impala.hive.HiveCommands;
import org.springframework.shell.core.CommandMarker;
import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
import org.springframework.shell.core.annotation.CliCommand;
Expand Down Expand Up @@ -80,15 +79,6 @@ public class ClusterCommands implements CommandMarker {
@Autowired
private AppManagerRestClient appManagerRestClient;

@Autowired
private Configuration hadoopConfiguration;

@Autowired
private HiveCommands hiveCommands;

private String hiveServerUrl;
private String targetClusterName;

@CliAvailabilityIndicator({ "cluster help" })
public boolean isCommandAvailable() {
return true;
Expand Down Expand Up @@ -1107,155 +1097,6 @@ public void resetParam(
}
}

@CliCommand(value = "cluster target", help = "Set or query target cluster to run commands")
public void targetCluster(
@CliOption(key = { "name" }, mandatory = false, help = "The cluster name") final String name,
@CliOption(key = { "info" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "flag to show target information") final boolean info) {

ClusterRead cluster = null;
boolean noCluster = false;
try {
if (info) {
if (name != null) {
System.out
.println("Warning: can't specify option --name and --info at the same time");
return;
}
String fsUrl = hadoopConfiguration.get("fs.default.name");
String jtUrl = hadoopConfiguration.get("mapred.job.tracker");
if ((fsUrl == null || fsUrl.length() == 0)
&& (jtUrl == null || jtUrl.length() == 0)) {
System.out
.println("There is no targeted cluster. Run \"cluster target --name\" command first.");
return;
}
if (targetClusterName != null && targetClusterName.length() > 0) {
System.out.println("Cluster : " + targetClusterName);
}
if (fsUrl != null && fsUrl.length() > 0) {
System.out.println("HDFS url : " + fsUrl);
}
if (jtUrl != null && jtUrl.length() > 0) {
System.out.println("Job Tracker url : " + jtUrl);
}
if (hiveServerUrl != null && hiveServerUrl.length() > 0) {
System.out.println("Hive server info: " + hiveServerUrl);
}
} else {
if (name == null) {
ClusterRead[] clusters = restClient.getAll(false);
if (clusters != null && clusters.length > 0) {
cluster = clusters[0];
} else {
noCluster = true;
}
} else {
cluster = restClient.get(name, false);
}

if (cluster == null) {
if (noCluster) {
System.out
.println("There is no available cluster for targeting.");
} else {
System.out.println("Failed to target cluster: The cluster "
+ name + " not found");
}
setFsURL("");
setJobTrackerURL("");
this.setHiveServerUrl("");
} else {
targetClusterName = cluster.getName();
boolean hasHDFS = false;
boolean hasHiveServer = false;
for (NodeGroupRead nodeGroup : cluster.getNodeGroups()) {
for (String role : nodeGroup.getRoles()) {
if ("hadoop_namenode".equals(role)) {
List<NodeRead> nodes = nodeGroup.getInstances();
if (nodes != null && nodes.size() > 0) {
String nameNodeIP = nodes.get(0).fetchMgtIp();
setNameNode(nameNodeIP);
hasHDFS = true;
} else {
throw new CliRestException("no name node available");
}
}
if ("hadoop_jobtracker".equals(role)) {
List<NodeRead> nodes = nodeGroup.getInstances();
if (nodes != null && nodes.size() > 0) {
String jobTrackerIP = nodes.get(0).fetchMgtIp();
setJobTracker(jobTrackerIP);
} else {
throw new CliRestException(
"no job tracker available");
}
}
if ("hive_server".equals(role)) {
List<NodeRead> nodes = nodeGroup.getInstances();
if (nodes != null && nodes.size() > 0) {
String hiveServerIP = nodes.get(0).fetchMgtIp();
setHiveServerAddress(hiveServerIP);
hasHiveServer = true;
} else {
throw new CliRestException(
"no hive server available");
}
}
}
}
if (cluster.getExternalHDFS() != null
&& !cluster.getExternalHDFS().isEmpty()) {
setFsURL(cluster.getExternalHDFS());
hasHDFS = true;
}
if (!hasHDFS) {
setFsURL("");
}
if (!hasHiveServer) {
this.setHiveServerUrl("");
}
}
}
} catch (CliRestException e) {
CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, name,
Constants.OUTPUT_OP_TARGET, Constants.OUTPUT_OP_RESULT_FAIL,
e.getMessage());
setFsURL("");
setJobTrackerURL("");
this.setHiveServerUrl("");
}
}

private void setNameNode(String nameNodeAddress) {
String hdfsUrl = "hdfs://" + nameNodeAddress + ":8020";
setFsURL(hdfsUrl);
}

private void setFsURL(String fsURL) {
hadoopConfiguration.set("fs.default.name", fsURL);
}

private void setJobTracker(String jobTrackerAddress) {
String jobTrackerUrl = jobTrackerAddress + ":8021";
setJobTrackerURL(jobTrackerUrl);
}

private void setJobTrackerURL(String jobTrackerUrl) {
hadoopConfiguration.set("mapred.job.tracker", jobTrackerUrl);
}

private void setHiveServerAddress(String hiveServerAddress) {
try {
hiveServerUrl = hiveCommands.config(hiveServerAddress, 10000, null);
} catch (Exception e) {
throw new CliRestException("faild to set hive server address");
}
}

private void setHiveServerUrl(String hiveServerUrl) {
this.hiveServerUrl = hiveServerUrl;
}

@CliCommand(value = "cluster config", help = "Config an existing cluster")
public void configCluster(
@CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,4 @@
<bean class="com.vmware.bdd.cli.rest.RestErrorHandler" />
</property>
</bean>
<bean id="hadoopConfiguration" class="org.apache.hadoop.conf.Configuration"/>
<bean id="hiveCommands" class="org.springframework.data.hadoop.impala.hive.HiveCommands"/>
</beans>
35 changes: 0 additions & 35 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

<modules>
<module>server/software-mgmt-plugin-common</module>
<module>impala</module>
<module>lib/vlsi</module>
<module>server/common</module>
<module>server/vc</module>
Expand Down Expand Up @@ -547,40 +546,6 @@
<artifactId>commons-serengeti</artifactId>
<version>2.2.0</version>
</dependency>
<dependency>
<groupId>com.vmware.bdd.impala.plugin.hdfs</groupId>
<artifactId>serengeti-impala-plugin-hdfs</artifactId>
<version>2.2.0</version>
</dependency>
<dependency>
<groupId>com.vmware.bdd.impala.plugin.mapreduce</groupId>
<artifactId>serengeti-impala-plugin-mapreduce</artifactId>
<version>2.2.0</version>
</dependency>
<dependency>
<groupId>com.vmware.bdd.impala.plugin.pig</groupId>
<artifactId>serengeti-impala-plugin-pig</artifactId>
<version>2.2.0</version>
</dependency>
<dependency>
<groupId>com.vmware.bdd.impala.plugin.hive</groupId>
<artifactId>serengeti-impala-plugin-hive</artifactId>
<version>2.2.0</version>
<exclusions>
<exclusion>
<groupId>org.antlr</groupId>
<artifactId>antlr-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>org.antlr</groupId>
<artifactId>antlr</artifactId>
</exclusion>
<exclusion>
<groupId>jline</groupId>
<artifactId>jline</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</dependencyManagement>

Expand Down

0 comments on commit 9c43e71

Please sign in to comment.