Skip to content

Commit

Permalink
Spark 3.5: Migrate tests to JUnit5 in actions directory (apache#9367)
Browse files Browse the repository at this point in the history
  • Loading branch information
chinmay-bhat authored and lisirrx committed Jan 4, 2024
1 parent 37a5dba commit 778765e
Show file tree
Hide file tree
Showing 8 changed files with 1,030 additions and 997 deletions.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,11 @@
package org.apache.iceberg.spark.actions;

import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;

import java.io.File;
import java.nio.file.Path;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
Expand All @@ -44,20 +47,16 @@
import org.apache.iceberg.hadoop.HadoopTables;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.spark.SparkTestBase;
import org.apache.iceberg.spark.TestBase;
import org.apache.iceberg.types.Types;
import org.assertj.core.api.Assertions;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;

public class TestDeleteReachableFilesAction extends SparkTestBase {
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;

public class TestDeleteReachableFilesAction extends TestBase {
private static final HadoopTables TABLES = new HadoopTables(new Configuration());
private static final Schema SCHEMA =
new Schema(
Expand Down Expand Up @@ -113,13 +112,13 @@ public class TestDeleteReachableFilesAction extends SparkTestBase {
.withRecordCount(1)
.build();

@Rule public TemporaryFolder temp = new TemporaryFolder();
@TempDir private Path temp;

private Table table;

@Before
@BeforeEach
public void setupTableLocation() throws Exception {
File tableDir = temp.newFolder();
File tableDir = temp.resolve("junit").toFile();
String tableLocation = tableDir.toURI().toString();
this.table = TABLES.create(SCHEMA, SPEC, Maps.newHashMap(), tableLocation);
spark.conf().set("spark.sql.shuffle.partitions", SHUFFLE_PARTITIONS);
Expand All @@ -133,30 +132,29 @@ private void checkRemoveFilesResults(
long expectedManifestListsDeleted,
long expectedOtherFilesDeleted,
DeleteReachableFiles.Result results) {
Assert.assertEquals(
"Incorrect number of manifest files deleted",
expectedManifestsDeleted,
results.deletedManifestsCount());
Assert.assertEquals(
"Incorrect number of datafiles deleted",
expectedDatafiles,
results.deletedDataFilesCount());
Assert.assertEquals(
"Incorrect number of position delete files deleted",
expectedPosDeleteFiles,
results.deletedPositionDeleteFilesCount());
Assert.assertEquals(
"Incorrect number of equality delete files deleted",
expectedEqDeleteFiles,
results.deletedEqualityDeleteFilesCount());
Assert.assertEquals(
"Incorrect number of manifest lists deleted",
expectedManifestListsDeleted,
results.deletedManifestListsCount());
Assert.assertEquals(
"Incorrect number of other lists deleted",
expectedOtherFilesDeleted,
results.deletedOtherFilesCount());
assertThat(results.deletedManifestsCount())
.as("Incorrect number of manifest files deleted")
.isEqualTo(expectedManifestsDeleted);

assertThat(results.deletedDataFilesCount())
.as("Incorrect number of datafiles deleted")
.isEqualTo(expectedDatafiles);

assertThat(results.deletedPositionDeleteFilesCount())
.as("Incorrect number of position delete files deleted")
.isEqualTo(expectedPosDeleteFiles);

assertThat(results.deletedEqualityDeleteFilesCount())
.as("Incorrect number of equality delete files deleted")
.isEqualTo(expectedEqDeleteFiles);

assertThat(results.deletedManifestListsCount())
.as("Incorrect number of manifest lists deleted")
.isEqualTo(expectedManifestListsDeleted);

assertThat(results.deletedOtherFilesCount())
.as("Incorrect number of other lists deleted")
.isEqualTo(expectedOtherFilesDeleted);
}

@Test
Expand Down Expand Up @@ -196,15 +194,17 @@ public void dataFilesCleanupWithParallelTasks() {

// Verifies that the delete methods ran in the threads created by the provided ExecutorService
// ThreadFactory
Assert.assertEquals(
deleteThreads,
Sets.newHashSet("remove-files-0", "remove-files-1", "remove-files-2", "remove-files-3"));
assertThat(deleteThreads)
.isEqualTo(
Sets.newHashSet(
"remove-files-0", "remove-files-1", "remove-files-2", "remove-files-3"));

Lists.newArrayList(FILE_A, FILE_B, FILE_C, FILE_D)
.forEach(
file ->
Assert.assertTrue(
"FILE_A should be deleted", deletedFiles.contains(FILE_A.path().toString())));
assertThat(deletedFiles)
.as("FILE_A should be deleted")
.contains(FILE_A.path().toString()));
checkRemoveFilesResults(4L, 0, 0, 6L, 4L, 6, result);
}

Expand Down Expand Up @@ -329,10 +329,9 @@ public void testUseLocalIterator() {

checkRemoveFilesResults(3L, 0, 0, 4L, 3L, 5, results);

Assert.assertEquals(
"Expected total jobs to be equal to total number of shuffle partitions",
totalJobsRun,
SHUFFLE_PARTITIONS);
assertThat(totalJobsRun)
.as("Expected total jobs to be equal to total number of shuffle partitions")
.isEqualTo(SHUFFLE_PARTITIONS);
});
}

Expand All @@ -345,11 +344,10 @@ public void testIgnoreMetadataFilesNotFound() {
DeleteOrphanFiles.Result result =
sparkActions().deleteOrphanFiles(table).olderThan(System.currentTimeMillis()).execute();

Assert.assertEquals("Should delete 1 file", 1, Iterables.size(result.orphanFileLocations()));
Assert.assertTrue(
"Should remove v1 file",
StreamSupport.stream(result.orphanFileLocations().spliterator(), false)
.anyMatch(file -> file.contains("v1.metadata.json")));
assertThat(result.orphanFileLocations()).as("Should delete 1 file").hasSize(1);
assertThat(StreamSupport.stream(result.orphanFileLocations().spliterator(), false))
.as("Should remove v1 file")
.anyMatch(file -> file.contains("v1.metadata.json"));

DeleteReachableFiles baseRemoveFilesSparkAction =
sparkActions().deleteReachableFiles(metadataLocation(table)).io(table.io());
Expand All @@ -363,7 +361,7 @@ public void testEmptyIOThrowsException() {
DeleteReachableFiles baseRemoveFilesSparkAction =
sparkActions().deleteReachableFiles(metadataLocation(table)).io(null);

Assertions.assertThatThrownBy(baseRemoveFilesSparkAction::execute)
assertThatThrownBy(baseRemoveFilesSparkAction::execute)
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("File IO cannot be null");
}
Expand All @@ -372,8 +370,7 @@ public void testEmptyIOThrowsException() {
public void testRemoveFilesActionWhenGarbageCollectionDisabled() {
table.updateProperties().set(TableProperties.GC_ENABLED, "false").commit();

Assertions.assertThatThrownBy(
() -> sparkActions().deleteReachableFiles(metadataLocation(table)).execute())
assertThatThrownBy(() -> sparkActions().deleteReachableFiles(metadataLocation(table)).execute())
.isInstanceOf(ValidationException.class)
.hasMessage(
"Cannot delete files: GC is disabled (deleting files may corrupt other tables)");
Expand Down
Loading

0 comments on commit 778765e

Please sign in to comment.