Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[HUDI-4696] Fix flaky TestHoodieCombineHiveInputFormat #6494

Merged
merged 1 commit into from
Aug 25, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

package org.apache.hudi.common.testutils.minicluster;

import org.apache.hudi.common.testutils.HoodieTestUtils;
import org.apache.hudi.common.testutils.NetworkTestUtils;
import org.apache.hudi.common.util.FileIOUtils;

Expand All @@ -45,7 +44,7 @@ public class HdfsTestService {
/**
* Configuration settings.
*/
private Configuration hadoopConf;
private final Configuration hadoopConf;
private final String workDir;

/**
Expand All @@ -54,6 +53,7 @@ public class HdfsTestService {
private MiniDFSCluster miniDfsCluster;

public HdfsTestService() throws IOException {
hadoopConf = new Configuration();
workDir = Files.createTempDirectory("temp").toAbsolutePath().toString();
}

Expand All @@ -63,7 +63,6 @@ public Configuration getHadoopConf() {

public MiniDFSCluster start(boolean format) throws IOException {
Objects.requireNonNull(workDir, "The work dir must be set before starting cluster.");
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();

// If clean, then remove the work dir so we can start fresh.
String localDFSLocation = getDFSLocation(workDir);
Expand Down Expand Up @@ -107,7 +106,6 @@ public void stop() {
miniDfsCluster.shutdown(true, true);
}
miniDfsCluster = null;
hadoopConf = null;
}

/**
Expand All @@ -123,9 +121,9 @@ private static String getDFSLocation(String baseFsLocation) {
/**
* Configure the DFS Cluster before launching it.
*
* @param config The already created Hadoop configuration we'll further configure for HDFS
* @param config The already created Hadoop configuration we'll further configure for HDFS
* @param localDFSLocation The location on the local filesystem where cluster data is stored
* @param bindIP An IP address we want to force the datanode and namenode to bind to.
* @param bindIP An IP address we want to force the datanode and namenode to bind to.
* @return The updated Configuration object.
*/
private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP,
Expand All @@ -146,7 +144,7 @@ private static Configuration configureDFSCluster(Configuration config, String lo
String user = System.getProperty("user.name");
config.set("hadoop.proxyuser." + user + ".groups", "*");
config.set("hadoop.proxyuser." + user + ".hosts", "*");
config.setBoolean("dfs.permissions",false);
config.setBoolean("dfs.permissions", false);
return config;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,8 @@
* limitations under the License.
*/

package org.apache.hudi.hadoop.functional;
package org.apache.hudi.hadoop.hive;

import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.io.IOContextMap;
import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.hudi.common.model.HoodieCommitMetadata;
import org.apache.hudi.common.model.HoodieTableType;
Expand All @@ -33,28 +31,27 @@
import org.apache.hudi.common.testutils.minicluster.MiniClusterUtil;
import org.apache.hudi.common.util.CommitUtils;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.hadoop.hive.HoodieCombineHiveInputFormat;
import org.apache.hudi.hadoop.hive.HoodieCombineRealtimeFileSplit;
import org.apache.hudi.hadoop.hive.HoodieCombineRealtimeHiveSplit;
import org.apache.hudi.hadoop.realtime.HoodieParquetRealtimeInputFormat;
import org.apache.hudi.hadoop.testutils.InputFormatTestUtil;

import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
import org.apache.hadoop.hive.ql.io.IOContextMap;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.FileSplit;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
Expand Down