Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
...
compare: d4ea3e5f2a
  • 17 commits
  • 19 files changed
  • 0 commit comments
  • 4 contributors
Commits on Feb 11, 2011
@julienledem julienledem adding cause to exception b8ed062
Mac Yang Check in patched datanucleus libraries that fixed the MySQL nested su…
…bstring issue
99806e7
Commits on Feb 12, 2011
@ashutoshc ashutoshc Merge remote branch 'gh/howl' into howl dedb110
Commits on Feb 14, 2011
@ashutoshc ashutoshc Remnannts of package renaming. 51066b5
Commits on Feb 15, 2011
@julienledem julienledem removing file accidentally added 6855794
@khorgath khorgath Modifying hive launch script to allow for a HADOOP_OVERRIDE specifica…
…tion
bfe5275
@ashutoshc ashutoshc Merge remote branch 'julien/howl' into howl 8018eb8
@ashutoshc ashutoshc Merge branch 'howl' of ssh://scm200.gold.ygrid.yahoo.com/grid/git/hiv…
…e into howl
b52bea7
Commits on Feb 16, 2011
Mac Yang first pass for the max partition check 9485947
Mac Yang remove datanucleus-core-2.0.5.jar 7d616c0
Mac Yang Added datanucleus.LICENSE since we now include datanucleus jars in th…
…e lib directory.

Also replaced datanucleus-core-2.0.5.jar with the "hive standard" datanucleus-core-2.0.3.jar
1da4675
Mac Yang revert back to datanucleus-core-2.0.5.jar e7477e6
Mac Yang revert back to datanucleus-core-2.0.5.jar 67778e8
Commits on Feb 17, 2011
@khorgath khorgath Fixing bug with null HADOOP_OVERRIDE 2566c93
@khorgath khorgath Merge branch 'howl' of ssh://scm200.gold.ygrid.yahoo.com/grid/git/hiv…
…e into howl
18c52d9
@khorgath khorgath Changes to be able to read and write to hive RCFile tables without ho…
…wl instrumentation
6548fc6
@khorgath khorgath Merge branch 'howl' of ssh://scm200.gold.ygrid.yahoo.com/grid/git/hiv…
…e into howl
d4ea3e5
View
6 bin/hive
@@ -150,6 +150,12 @@ if [ "$HADOOP_HOME" == "" ]; then
fi
HADOOP=$HADOOP_HOME/bin/hadoop
+if [ "x$HADOOP_OVERRIDE" != "x" ]; then
+ if [ -f ${HADOOP_OVERRIDE} ]; then
+ HADOOP=$HADOOP_OVERRIDE
+ fi
+fi
+
if [ ! -f ${HADOOP} ]; then
echo "Cannot find hadoop installation: \$HADOOP_HOME must be set or hadoop must be in the path";
exit 4;
View
1  howl/src/java/org/apache/howl/common/ErrorType.java
@@ -24,6 +24,7 @@
/* Howl Input Format related errors 1000 - 1999 */
ERROR_DB_INIT (1000, "Error initializing database session"),
+ ERROR_EXCEED_MAXPART (1001, "Query result exceeded maximum number of partitions allowed"),
/* Howl Output Format related errors 2000 - 2999 */
View
5 howl/src/java/org/apache/howl/common/HowlConstants.java
@@ -8,6 +8,11 @@
/** The key for the output storage driver class name */
public static final String HOWL_OSD_CLASS = "howl.osd";
+ public static final String HIVE_RCFILE_IF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileInputFormat";
+ public static final String HIVE_RCFILE_OF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileOutputFormat";
+ public static final String HOWL_RCFILE_ISD_CLASS = "org.apache.howl.rcfile.RCFileInputDriver";
+ public static final String HOWL_RCFILE_OSD_CLASS = "org.apache.howl.rcfile.RCFileOutputDriver";
+
private HowlConstants() { // restrict instantiation
}
View
2  howl/src/java/org/apache/howl/mapreduce/HowlOutputCommitter.java
@@ -184,7 +184,7 @@ public void cleanupJob(JobContext context) throws IOException {
try {
client = HowlOutputFormat.createHiveClient(tableInfo.getServerUri(), conf);
- StorerInfo storer = InitializeInput.extractStorerInfo(table.getParameters());
+ StorerInfo storer = InitializeInput.extractStorerInfo(table.getSd(),table.getParameters());
Partition partition = new Partition();
partition.setDbName(tableInfo.getDatabaseName());
View
4 howl/src/java/org/apache/howl/mapreduce/HowlOutputFormat.java
@@ -122,7 +122,7 @@ public static void setOutput(Job job, HowlTableInfo outputInfo) throws IOExcepti
StorageDescriptor tblSD = table.getSd();
HowlSchema tableSchema = HowlUtil.extractSchemaFromStorageDescriptor(tblSD);
- StorerInfo storerInfo = InitializeInput.extractStorerInfo(table.getParameters());
+ StorerInfo storerInfo = InitializeInput.extractStorerInfo(tblSD,table.getParameters());
List<String> partitionCols = new ArrayList<String>();
for(FieldSchema schema : table.getPartitionKeys()) {
@@ -451,9 +451,11 @@ static HiveMetaStoreClient createHiveClient(String url, Configuration conf) thro
if( url != null ) {
//User specified a thrift url
+/*
hiveConf.setBoolean(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, true);
hiveConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, conf.get(HowlConstants.HOWL_METASTORE_PRINCIPAL));
+*/
hiveConf.set("hive.metastore.local", "false");
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, url);
if(conf.get(HOWL_KEY_TOKEN_SIGNATURE) != null) {
View
47 howl/src/java/org/apache/howl/mapreduce/InitializeInput.java
@@ -26,15 +26,17 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.howl.common.HowlConstants;
+import org.apache.howl.common.ErrorType;
+import org.apache.howl.common.HowlException;
+import org.apache.howl.common.HowlUtil;
+import org.apache.howl.data.schema.HowlSchema;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.mapreduce.Job;
-import org.apache.howl.common.HowlConstants;
-import org.apache.howl.common.HowlUtil;
-import org.apache.howl.data.schema.HowlSchema;
/**
* The Class which handles querying the metadata server using the MetaStoreClient. The list of
@@ -46,13 +48,15 @@
/** The prefix for keys used for storage driver arguments */
private static final String HOWL_KEY_PREFIX = "howl.";
+ private static final HiveConf hiveConf = new HiveConf(HowlInputFormat.class);
private static HiveMetaStoreClient createHiveMetaClient(Configuration conf, HowlTableInfo inputInfo) throws Exception {
- HiveConf hiveConf = new HiveConf(HowlInputFormat.class);
if (inputInfo.getServerUri() != null){
+/*
hiveConf.setBoolean(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, true);
hiveConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname,
inputInfo.getServerKerberosPrincipal());
+*/
hiveConf.set("hive.metastore.local", "false");
hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, inputInfo.getServerUri());
}
@@ -87,6 +91,18 @@ public static void setInput(Job job, HowlTableInfo inputInfo) throws Exception {
inputInfo.getDatabaseName(), inputInfo.getTableName(),
inputInfo.getFilter(), (short) -1);
+ // Default to 100,000 partitions if hive.metastore.maxpartition is not defined
+ int maxPart = hiveConf.getInt("hive.metastore.maxpartition", 100000);
+ /*
+ Properties props = hiveConf.getAllProperties();
+ for (Object k : props.keySet()) {
+ System.out.println("hiveConf: " + k + "=" + props.getProperty((String)k));
+ }
+ */
+ if (parts != null && parts.size() > maxPart) {
+ throw new HowlException(ErrorType.ERROR_EXCEED_MAXPART, "total number of partitions is " + parts.size());
+ }
+
// populate partition info
for (Partition ptn : parts){
PartInfo partInfo = extractPartInfo(ptn.getSd(),ptn.getParameters());
@@ -143,7 +159,12 @@ private static PartInfo extractPartInfo(StorageDescriptor sd, Map<String,String>
if (parameters.containsKey(HowlConstants.HOWL_ISD_CLASS)){
inputStorageDriverClass = parameters.get(HowlConstants.HOWL_ISD_CLASS);
}else{
- throw new IOException("No input storage driver classname found, cannot read partition");
+ // attempt to default to RCFile if the storage descriptor says it's an RCFile
+ if ((sd.getInputFormat() != null) && (sd.getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS))){
+ inputStorageDriverClass = HowlConstants.HOWL_RCFILE_ISD_CLASS;
+ }else{
+ throw new IOException("No input storage driver classname found, cannot read partition");
+ }
}
for (String key : parameters.keySet()){
if (key.startsWith(HOWL_KEY_PREFIX)){
@@ -155,19 +176,29 @@ private static PartInfo extractPartInfo(StorageDescriptor sd, Map<String,String>
- static StorerInfo extractStorerInfo(Map<String, String> properties) throws IOException {
+ static StorerInfo extractStorerInfo(StorageDescriptor sd, Map<String, String> properties) throws IOException {
String inputSDClass, outputSDClass;
if (properties.containsKey(HowlConstants.HOWL_ISD_CLASS)){
inputSDClass = properties.get(HowlConstants.HOWL_ISD_CLASS);
}else{
- throw new IOException("No input storage driver classname found for table, cannot write partition");
+ // attempt to default to RCFile if the storage descriptor says it's an RCFile
+ if ((sd.getInputFormat() != null) && (sd.getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS))){
+ inputSDClass = HowlConstants.HOWL_RCFILE_ISD_CLASS;
+ }else{
+ throw new IOException("No input storage driver classname found for table, cannot write partition");
+ }
}
if (properties.containsKey(HowlConstants.HOWL_OSD_CLASS)){
outputSDClass = properties.get(HowlConstants.HOWL_OSD_CLASS);
}else{
- throw new IOException("No output storage driver classname found for table, cannot write partition");
+ // attempt to default to RCFile if the storage descriptor says it's an RCFile
+ if ((sd.getOutputFormat() != null) && (sd.getOutputFormat().equals(HowlConstants.HIVE_RCFILE_OF_CLASS))){
+ outputSDClass = HowlConstants.HOWL_RCFILE_OSD_CLASS;
+ }else{
+ throw new IOException("No output storage driver classname found for table, cannot write partition");
+ }
}
Properties howlProperties = new Properties();
View
2  howl/src/java/org/apache/howl/pig/HowlStorer.java
@@ -486,7 +486,7 @@ public void setStoreLocation(String location, Job job) throws IOException {
} catch(HowlException he) {
// pass the message to the user - essentially something about the table
// information passed to HowlOutputFormat was not right
- throw new PigException(he.getMessage(), PigHowlUtil.PIG_EXCEPTION_CODE);
+ throw new PigException(he.getMessage(), PigHowlUtil.PIG_EXCEPTION_CODE, he);
}
howlTblSchema = HowlOutputFormat.getTableSchema(job);
try{
View
16 howl/src/java/org/apache/howl/pig/PigHowlUtil.java
@@ -26,21 +26,21 @@
import java.util.Properties;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.mapreduce.Job;
import org.apache.howl.common.HowlConstants;
import org.apache.howl.data.HowlArrayBag;
import org.apache.howl.data.HowlRecord;
import org.apache.howl.data.Pair;
import org.apache.howl.data.schema.HowlFieldSchema;
-import org.apache.howl.data.schema.HowlSchema;
import org.apache.howl.data.schema.HowlFieldSchema.Type;
+import org.apache.howl.data.schema.HowlSchema;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.pig.LoadPushDown.RequiredField;
import org.apache.pig.PigException;
import org.apache.pig.ResourceSchema;
-import org.apache.pig.LoadPushDown.RequiredField;
import org.apache.pig.ResourceSchema.ResourceFieldSchema;
import org.apache.pig.data.DataBag;
import org.apache.pig.data.DataType;
@@ -97,8 +97,10 @@ private static HiveMetaStoreClient createHiveMetaClient(String serverUri,
HiveConf hiveConf = new HiveConf(clazz);
if (serverUri != null){
+/*
hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
hiveConf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, serverKerberosPrincipal);
+*/
hiveConf.set("hive.metastore.local", "false");
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim());
}
View
180 howl/src/test/org/apache/howl/mapreduce/TestHowlHiveCompatibility.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.howl.mapreduce;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.service.HiveClient;
+import org.apache.howl.MiniCluster;
+import org.apache.howl.common.HowlConstants;
+import org.apache.howl.pig.HowlLoader;
+import org.apache.pig.ExecType;
+import org.apache.pig.PigException;
+import org.apache.pig.PigServer;
+import org.apache.pig.data.Tuple;
+import org.apache.pig.impl.logicalLayer.FrontendException;
+import org.apache.pig.impl.util.LogUtils;
+import org.apache.pig.impl.util.UDFContext;
+
+
+public class TestHowlHiveCompatibility extends TestCase {
+
+ MiniCluster cluster = MiniCluster.buildCluster();
+ private Driver driver;
+ Properties props;
+
+ private HiveMetaStoreClient client;
+
+ String fileName = "/tmp/input.data";
+ String fullFileName;
+
+ @Override
+ protected void setUp() throws Exception {
+
+ HiveConf hiveConf = new HiveConf(this.getClass());
+ hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
+ hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
+ hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+ driver = new Driver(hiveConf);
+ client = new HiveMetaStoreClient(hiveConf);
+ SessionState.start(new CliSessionState(hiveConf));
+ props = new Properties();
+ props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name"));
+ fullFileName = cluster.getProperties().getProperty("fs.default.name") + fileName;
+
+ MiniCluster.deleteFile(cluster, fileName);
+ int LOOP_SIZE = 11;
+ String[] input = new String[LOOP_SIZE];
+ for(int i = 0; i < LOOP_SIZE; i++) {
+ input[i] = i + "\t1";
+ }
+ MiniCluster.createInputFile(cluster, fileName, input);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ MiniCluster.deleteFile(cluster, fileName);
+ }
+
+ public void testUnpartedReadWrite() throws Exception{
+
+ driver.run("drop table junit_unparted_noisd");
+ String createTable = "create table junit_unparted_noisd(a int) stored as RCFILE";
+ int retCode = driver.run(createTable).getResponseCode();
+ if(retCode != 0) {
+ throw new IOException("Failed to create table.");
+ }
+
+ // assert that the table created has no howl instrumentation, and that we're still able to read it.
+ Table table = client.getTable("default", "junit_unparted_noisd");
+ assertFalse(table.getParameters().containsKey(HowlConstants.HOWL_ISD_CLASS));
+ assertTrue(table.getSd().getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS));
+
+ PigServer server = new PigServer(ExecType.LOCAL, props);
+ UDFContext.getUDFContext().setClientSystemProps();
+ server.registerQuery("A = load '"+fullFileName+"' as (a:int);");
+ server.registerQuery("store A into 'default.junit_unparted_noisd' using org.apache.howl.pig.HowlStorer();");
+ server.registerQuery("B = load 'default.junit_unparted_noisd' using "+HowlLoader.class.getName()+"();");
+ Iterator<Tuple> itr= server.openIterator("B");
+
+ int i = 0;
+
+ while(itr.hasNext()){
+ Tuple t = itr.next();
+ assertEquals(1, t.size());
+ assertEquals(t.get(0), i);
+ i++;
+ }
+
+ assertFalse(itr.hasNext());
+ assertEquals(11, i);
+
+ // assert that the table created still has no howl instrumentation
+ Table table2 = client.getTable("default", "junit_unparted_noisd");
+ assertFalse(table2.getParameters().containsKey(HowlConstants.HOWL_ISD_CLASS));
+ assertTrue(table2.getSd().getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS));
+
+ driver.run("drop table junit_unparted_noisd");
+ }
+
+ public void testPartedRead() throws Exception{
+
+ driver.run("drop table junit_parted_noisd");
+ String createTable = "create table junit_parted_noisd(a int) partitioned by (b string) stored as RCFILE";
+ int retCode = driver.run(createTable).getResponseCode();
+ if(retCode != 0) {
+ throw new IOException("Failed to create table.");
+ }
+
+ // assert that the table created has no howl instrumentation, and that we're still able to read it.
+ Table table = client.getTable("default", "junit_parted_noisd");
+
+ assertFalse(table.getParameters().containsKey(HowlConstants.HOWL_ISD_CLASS));
+ assertTrue(table.getSd().getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS));
+
+ PigServer server = new PigServer(ExecType.LOCAL, props);
+ UDFContext.getUDFContext().setClientSystemProps();
+ server.registerQuery("A = load '"+fullFileName+"' as (a:int);");
+ server.registerQuery("store A into 'default.junit_parted_noisd' using org.apache.howl.pig.HowlStorer('b=42');");
+ server.registerQuery("B = load 'default.junit_parted_noisd' using "+HowlLoader.class.getName()+"();");
+ Iterator<Tuple> itr= server.openIterator("B");
+
+ int i = 0;
+
+ while(itr.hasNext()){
+ Tuple t = itr.next();
+ assertEquals(2, t.size());
+ assertEquals(t.get(0), i);
+ assertEquals(t.get(1), "42");
+ i++;
+ }
+
+ assertFalse(itr.hasNext());
+ assertEquals(11, i);
+
+ // assert that the table created still has no howl instrumentation
+ Table table2 = client.getTable("default", "junit_parted_noisd");
+ assertFalse(table2.getParameters().containsKey(HowlConstants.HOWL_ISD_CLASS));
+ assertTrue(table2.getSd().getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS));
+
+ // assert that there is one partition present, and it had howl instrumentation inserted when it was created.
+ Partition ptn = client.getPartition("default", "junit_parted_noisd", Arrays.asList("42"));
+
+ assertNotNull(ptn);
+ assertTrue(ptn.getParameters().containsKey(HowlConstants.HOWL_ISD_CLASS));
+ assertTrue(ptn.getSd().getInputFormat().equals(HowlConstants.HIVE_RCFILE_IF_CLASS));
+ driver.run("drop table junit_unparted_noisd");
+ }
+
+
+}
View
2  howl/src/test/org/apache/howl/mapreduce/TestHowlOutputFormat.java
@@ -159,7 +159,7 @@ public void publishTest(Job job) throws Exception {
Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
assertNotNull(part);
- StorerInfo storer = InitializeInput.extractStorerInfo(part.getParameters());
+ StorerInfo storer = InitializeInput.extractStorerInfo(part.getSd(),part.getParameters());
assertEquals(storer.getInputSDClass(), "testInputClass");
assertEquals(storer.getProperties().get("howl.testarg"), "testArgValue");
assertTrue(part.getSd().getLocation().indexOf("p1") != -1);
View
26 howl/src/test/org/apache/howl/pig/TestHowlLoader.java
@@ -70,8 +70,8 @@ private void createTable(String tablename, String schema, String partitionedBy)
if ((partitionedBy != null)&&(!partitionedBy.trim().isEmpty())){
createTable = createTable + "partitioned by ("+partitionedBy+") ";
}
- createTable = createTable + "stored as RCFILE tblproperties('howl.isd'='org.apache.hadoop.hive.howl.rcfile.RCFileInputDriver'," +
- "'howl.osd'='org.apache.hadoop.hive.howl.rcfile.RCFileOutputDriver') ";
+ createTable = createTable + "stored as RCFILE tblproperties('howl.isd'='org.apache.howl.rcfile.RCFileInputDriver'," +
+ "'howl.osd'='org.apache.howl.rcfile.RCFileOutputDriver') ";
int retCode = driver.run(createTable).getResponseCode();
if(retCode != 0) {
throw new IOException("Failed to create table. ["+createTable+"], return code from hive driver : ["+retCode+"]");
@@ -140,17 +140,17 @@ protected void guardedSetUpBeforeClass() throws Exception {
server.setBatchOn();
server.registerQuery("A = load '"+fullFileNameBasic+"' as (a:int, b:chararray);");
- server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer();");
+ server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.howl.pig.HowlStorer();");
server.registerQuery("B = foreach A generate a,b;");
server.registerQuery("B2 = filter B by a < 2;");
- server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer('bkt=0');");
+ server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlStorer('bkt=0');");
server.registerQuery("C = foreach A generate a,b;");
server.registerQuery("C2 = filter C by a >= 2;");
- server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer('bkt=1');");
+ server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlStorer('bkt=1');");
server.registerQuery("D = load '"+fullFileNameComplex+"' as (name:chararray, studentid:int, contact:tuple(phno:chararray,email:chararray), currently_registered_courses:bag{innertup:tuple(course:chararray)}, current_grades:map[ ] , phnos :bag{innertup:tuple(phno:chararray,type:chararray)});");
- server.registerQuery("store D into '"+COMPLEX_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer();");
+ server.registerQuery("store D into '"+COMPLEX_TABLE+"' using org.apache.howl.pig.HowlStorer();");
server.executeBatch();
}
@@ -185,7 +185,7 @@ public void testSchemaLoadBasic() throws IOException{
PigServer server = new PigServer(ExecType.LOCAL, props);
// test that schema was loaded correctly
- server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlLoader();");
+ server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.howl.pig.HowlLoader();");
Schema dumpedXSchema = server.dumpSchema("X");
List<FieldSchema> Xfields = dumpedXSchema.getFields();
assertEquals(2,Xfields.size());
@@ -199,7 +199,7 @@ public void testSchemaLoadBasic() throws IOException{
public void testReadDataBasic() throws IOException {
PigServer server = new PigServer(ExecType.LOCAL, props);
- server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlLoader();");
+ server.registerQuery("X = load '"+BASIC_TABLE+"' using org.apache.howl.pig.HowlLoader();");
Iterator<Tuple> XIter = server.openIterator("X");
int numTuplesRead = 0;
while( XIter.hasNext() ){
@@ -219,7 +219,7 @@ public void testSchemaLoadComplex() throws IOException{
PigServer server = new PigServer(ExecType.LOCAL, props);
// test that schema was loaded correctly
- server.registerQuery("K = load '"+COMPLEX_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlLoader();");
+ server.registerQuery("K = load '"+COMPLEX_TABLE+"' using org.apache.howl.pig.HowlLoader();");
Schema dumpedKSchema = server.dumpSchema("K");
List<FieldSchema> Kfields = dumpedKSchema.getFields();
assertEquals(6,Kfields.size());
@@ -279,7 +279,7 @@ public void testReadPartitionedBasic() throws IOException {
driver.getResults(valuesReadFromHiveDriver);
assertEquals(basicInputData.size(),valuesReadFromHiveDriver.size());
- server.registerQuery("W = load '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlLoader();");
+ server.registerQuery("W = load '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlLoader();");
Schema dumpedWSchema = server.dumpSchema("W");
List<FieldSchema> Wfields = dumpedWSchema.getFields();
assertEquals(3,Wfields.size());
@@ -307,7 +307,7 @@ public void testReadPartitionedBasic() throws IOException {
}
assertEquals(valuesReadFromHiveDriver.size(),valuesRead.size());
- server.registerQuery("P1 = load '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlLoader();");
+ server.registerQuery("P1 = load '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlLoader();");
server.registerQuery("P1filter = filter P1 by bkt == '0';");
Iterator<Tuple> P1Iter = server.openIterator("P1filter");
int count1 = 0;
@@ -320,7 +320,7 @@ public void testReadPartitionedBasic() throws IOException {
}
assertEquals(3, count1);
- server.registerQuery("P2 = load '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlLoader();");
+ server.registerQuery("P2 = load '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlLoader();");
server.registerQuery("P2filter = filter P2 by bkt == '1';");
Iterator<Tuple> P2Iter = server.openIterator("P2filter");
int count2 = 0;
@@ -340,7 +340,7 @@ public void testProjectionsBasic() throws IOException {
// projections are handled by using generate, not "as" on the Load
- server.registerQuery("Y1 = load '"+BASIC_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlLoader();");
+ server.registerQuery("Y1 = load '"+BASIC_TABLE+"' using org.apache.howl.pig.HowlLoader();");
server.registerQuery("Y2 = foreach Y1 generate a;");
server.registerQuery("Y3 = foreach Y1 generate b,a;");
Schema dumpedY2Schema = server.dumpSchema("Y2");
View
16 howl/src/test/org/apache/howl/pig/TestHowlStorerMulti.java
@@ -57,8 +57,8 @@ private void createTable(String tablename, String schema, String partitionedBy)
if ((partitionedBy != null)&&(!partitionedBy.trim().isEmpty())){
createTable = createTable + "partitioned by ("+partitionedBy+") ";
}
- createTable = createTable + "stored as RCFILE tblproperties('howl.isd'='org.apache.hadoop.hive.howl.rcfile.RCFileInputDriver'," +
- "'howl.osd'='org.apache.hadoop.hive.howl.rcfile.RCFileOutputDriver') ";
+ createTable = createTable + "stored as RCFILE tblproperties('howl.isd'='org.apache.howl.rcfile.RCFileInputDriver'," +
+ "'howl.osd'='org.apache.howl.rcfile.RCFileOutputDriver') ";
int retCode = driver.run(createTable).getResponseCode();
if(retCode != 0) {
throw new IOException("Failed to create table. ["+createTable+"], return code from hive driver : ["+retCode+"]");
@@ -103,7 +103,7 @@ public void testStoreBasicTable() throws Exception {
UDFContext.getUDFContext().setClientSystemProps();
server.setBatchOn();
server.registerQuery("A = load '"+basicFileFullName+"' as (a:int, b:chararray);");
- server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer();");
+ server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.howl.pig.HowlStorer();");
server.executeBatch();
@@ -124,9 +124,9 @@ public void testStorePartitionedTable() throws Exception {
server.registerQuery("A = load '"+basicFileFullName+"' as (a:int, b:chararray);");
server.registerQuery("B2 = filter A by a < 2;");
- server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer('bkt=0');");
+ server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlStorer('bkt=0');");
server.registerQuery("C2 = filter A by a >= 2;");
- server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer('bkt=1');");
+ server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlStorer('bkt=1');");
server.executeBatch();
@@ -148,12 +148,12 @@ public void testStoreTableMulti() throws Exception {
UDFContext.getUDFContext().setClientSystemProps();
server.setBatchOn();
server.registerQuery("A = load '"+basicFileFullName+"' as (a:int, b:chararray);");
- server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer();");
+ server.registerQuery("store A into '"+BASIC_TABLE+"' using org.apache.howl.pig.HowlStorer();");
server.registerQuery("B2 = filter A by a < 2;");
- server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer('bkt=0');");
+ server.registerQuery("store B2 into '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlStorer('bkt=0');");
server.registerQuery("C2 = filter A by a >= 2;");
- server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.hadoop.hive.howl.pig.HowlStorer('bkt=1');");
+ server.registerQuery("store C2 into '"+PARTITIONED_TABLE+"' using org.apache.howl.pig.HowlStorer('bkt=1');");
server.executeBatch();
View
4 ivy/libraries.properties
@@ -22,10 +22,6 @@ ant-task.version=2.0.10
antlr.version=3.0.1
antlr-runtime.version=3.0.1
asm.version=3.1
-datanucleus-connectionpool.version=2.0.3
-datanucleus-core.version=2.0.3
-datanucleus-enhancer.version=2.0.3
-datanucleus-rdbms.version=2.0.3
checkstyle.version=5.0
commons-cli.version=2.0-SNAPSHOT
commons-codec.version=1.3
View
BIN  lib/datanucleus-connectionpool-2.0.3.jar
Binary file not shown
View
BIN  lib/datanucleus-core-2.0.5.jar
Binary file not shown
View
BIN  lib/datanucleus-enhancer-2.0.3.jar
Binary file not shown
View
BIN  lib/datanucleus-rdbms-2.0.5.jar
Binary file not shown
View
201 lib/datanucleus.LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
View
2  metastore/ivy.xml
@@ -25,6 +25,7 @@
<exclude org="org.apache.geronimo.specs" module="geronimo-jta_1.1_spec"/>
</dependency>
<dependency org="commons-pool" name="commons-pool" rev="${commons-pool.version}"/>
+<!--
<dependency org="org.datanucleus" name="datanucleus-connectionpool" rev="${datanucleus-connectionpool.version}">
<exclude module="proxool" />
<exclude module="c3p0" />
@@ -58,6 +59,7 @@
<exclude org="org.apache.ant" module="ant"/>
<exclude org="oracle" module="ojdbc14_g"/>
</dependency>
+-->
<dependency org="javax.jdo" name="jdo2-api" rev="${jdo-api.version}">
<exclude org="javax.transaction" module="jta"/>
<exclude org="org.apache.ant" module="ant"/>

No commit comments for this range

Something went wrong with that request. Please try again.