From e1b665778d0eace4180f4c9c736e3619b52c53f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 6 May 2022 11:07:06 +0200 Subject: [PATCH 01/16] [FLINK-27185][connector] Convert connector-base module to assertj Co-authored-by: slinkydeveloper --- .../sink/writer/AsyncSinkWriterTestUtils.java | 14 +-- .../source/hybrid/HybridSourceITCase.java | 5 +- .../source/hybrid/HybridSourceReaderTest.java | 53 ++++++----- .../HybridSourceSplitEnumeratorTest.java | 84 +++++++++--------- .../HybridSourceSplitSerializerTest.java | 16 ++-- .../base/source/hybrid/HybridSourceTest.java | 13 ++- .../reader/CoordinatedSourceITCase.java | 8 +- .../CoordinatedSourceRescaleITCase.java | 29 +++---- .../source/reader/SourceMetricsITCase.java | 87 +++++++++++-------- .../fetcher/SplitFetcherManagerTest.java | 21 ++--- .../reader/fetcher/SplitFetcherTest.java | 43 +++++---- .../FutureCompletingBlockingQueueTest.java | 79 ++++++++--------- .../base/source/utils/SerdeUtilsTest.java | 4 +- 13 files changed, 221 insertions(+), 235 deletions(-) diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/AsyncSinkWriterTestUtils.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/AsyncSinkWriterTestUtils.java index b8e49b55b59f4..c64493371a19c 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/AsyncSinkWriterTestUtils.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/sink/writer/AsyncSinkWriterTestUtils.java @@ -24,7 +24,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Utils class for {@link AsyncSinkWriter} related test. */ public class AsyncSinkWriterTestUtils { @@ -46,21 +46,21 @@ public static BufferedRequestState getTestState( public static void assertThatBufferStatesAreEqual( BufferedRequestState actual, BufferedRequestState expected) { // Equal states must have equal sizes - assertEquals(actual.getStateSize(), expected.getStateSize()); + assertThat(actual.getStateSize()).isEqualTo(expected.getStateSize()); // Equal states must have the same number of requests. int actualLength = actual.getBufferedRequestEntries().size(); - assertEquals(actualLength, expected.getBufferedRequestEntries().size()); + assertThat(actualLength).isEqualTo(expected.getBufferedRequestEntries().size()); List> actualRequests = actual.getBufferedRequestEntries(); List> expectedRequests = expected.getBufferedRequestEntries(); // Equal states must have same requests in the same order. for (int i = 0; i < actualLength; i++) { - assertEquals( - actualRequests.get(i).getRequestEntry(), - expectedRequests.get(i).getRequestEntry()); - assertEquals(actualRequests.get(i).getSize(), expectedRequests.get(i).getSize()); + assertThat(actualRequests.get(i).getRequestEntry()) + .isEqualTo(expectedRequests.get(i).getRequestEntry()); + assertThat(actualRequests.get(i).getSize()) + .isEqualTo(expectedRequests.get(i).getSize()); } } } diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java index d5105144358c2..ae04ce6c9ae9f 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceITCase.java @@ -47,8 +47,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.hamcrest.Matchers.equalTo; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** MiniCluster-based integration test for the {@link HybridSource}. */ public class HybridSourceITCase extends TestLogger { @@ -205,7 +204,7 @@ private static void restartTaskManager(Runnable afterFailAction, MiniCluster min private static void verifyResult(List result) { Collections.sort(result); - assertThat(result, equalTo(EXPECTED_RESULT)); + assertThat(result).isEqualTo(EXPECTED_RESULT); } // ------------------------------------------------------------------------ diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceReaderTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceReaderTest.java index 031a7355613f7..b9c2f5661156f 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceReaderTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceReaderTest.java @@ -30,14 +30,14 @@ import org.apache.flink.core.io.InputStatus; import org.apache.flink.mock.Whitebox; -import org.hamcrest.Matchers; -import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; import java.util.Collections; import java.util.List; +import static org.assertj.core.api.Assertions.assertThat; + /** Tests for {@link HybridSourceReader}. */ public class HybridSourceReaderTest { @@ -55,11 +55,11 @@ public void testReader() throws Exception { HybridSourceReader reader = new HybridSourceReader<>(readerContext); - Assert.assertThat(readerContext.getSentEvents(), Matchers.emptyIterable()); + assertThat(readerContext.getSentEvents()).isEmpty(); reader.start(); assertAndClearSourceReaderFinishedEvent(readerContext, -1); - Assert.assertNull(currentReader(reader)); - Assert.assertEquals(InputStatus.NOTHING_AVAILABLE, reader.pollNext(readerOutput)); + assertThat(currentReader(reader)).isNull(); + assertThat(reader.pollNext(readerOutput)).isEqualTo(InputStatus.NOTHING_AVAILABLE); Source source1 = new MockSource(null, 0) { @@ -85,19 +85,19 @@ public SourceReader createReader( status = reader.pollNext(readerOutput); Thread.sleep(10); } - Assert.assertThat(readerOutput.getEmittedRecords(), Matchers.contains(0)); + assertThat(readerOutput.getEmittedRecords()).contains(0); reader.pollNext(readerOutput); - Assert.assertEquals( - "before notifyNoMoreSplits", - InputStatus.NOTHING_AVAILABLE, - reader.pollNext(readerOutput)); + assertThat(reader.pollNext(readerOutput)) + .as("before notifyNoMoreSplits") + .isEqualTo(InputStatus.NOTHING_AVAILABLE); reader.notifyNoMoreSplits(); reader.pollNext(readerOutput); assertAndClearSourceReaderFinishedEvent(readerContext, 0); - Assert.assertEquals( - "reader before switch source event", mockSplitReader1, currentReader(reader)); + assertThat(currentReader(reader)) + .as("reader before switch source event") + .isEqualTo(mockSplitReader1); Source source2 = new MockSource(null, 0) { @@ -108,14 +108,14 @@ public SourceReader createReader( } }; reader.handleSourceEvents(new SwitchSourceEvent(1, source2, true)); - Assert.assertEquals( - "reader after switch source event", mockSplitReader2, currentReader(reader)); + assertThat(currentReader(reader)) + .as("reader after switch source event") + .isEqualTo(mockSplitReader2); reader.notifyNoMoreSplits(); - Assert.assertEquals( - "reader 1 after notifyNoMoreSplits", - InputStatus.END_OF_INPUT, - reader.pollNext(readerOutput)); + assertThat(reader.pollNext(readerOutput)) + .as("reader 1 after notifyNoMoreSplits") + .isEqualTo(InputStatus.END_OF_INPUT); reader.close(); } @@ -140,22 +140,22 @@ public void testReaderRecovery() throws Exception { reader.addSplits(Collections.singletonList(hybridSplit)); List snapshot = reader.snapshotState(0); - Assert.assertThat(snapshot, Matchers.contains(hybridSplit)); + assertThat(snapshot).contains(hybridSplit); // reader recovery readerContext.clearSentEvents(); reader = new HybridSourceReader<>(readerContext); reader.addSplits(snapshot); - Assert.assertNull(currentReader(reader)); + assertThat(currentReader(reader)).isNull(); reader.start(); - Assert.assertNull(currentReader(reader)); + assertThat(currentReader(reader)).isNull(); assertAndClearSourceReaderFinishedEvent(readerContext, -1); reader.handleSourceEvents(new SwitchSourceEvent(0, source, false)); - Assert.assertNotNull(currentReader(reader)); - Assert.assertThat(reader.snapshotState(1), Matchers.contains(hybridSplit)); + assertThat(currentReader(reader)).isNotNull(); + assertThat(reader.snapshotState(1)).contains(hybridSplit); reader.close(); } @@ -196,10 +196,9 @@ private static SourceReader currentReader( private static void assertAndClearSourceReaderFinishedEvent( TestingReaderContext context, int sourceIndex) { - Assert.assertThat(context.getSentEvents(), Matchers.iterableWithSize(1)); - Assert.assertEquals( - sourceIndex, - ((SourceReaderFinishedEvent) context.getSentEvents().get(0)).sourceIndex()); + assertThat(context.getSentEvents()).hasSize(1); + assertThat(((SourceReaderFinishedEvent) context.getSentEvents().get(0)).sourceIndex()) + .isEqualTo(sourceIndex); context.clearSentEvents(); } } diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java index 7bcf69c5e7240..ae8e5ef85a14f 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java @@ -29,8 +29,6 @@ import org.apache.flink.connector.base.source.reader.mocks.MockSplitEnumerator; import org.apache.flink.mock.Whitebox; -import org.hamcrest.Matchers; -import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -39,8 +37,8 @@ import java.util.Collections; import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests for {@link HybridSourceSplitEnumerator}. */ public class HybridSourceSplitEnumeratorTest { @@ -63,32 +61,31 @@ private void setupEnumeratorAndTriggerSourceSwitch() { enumerator.start(); // mock enumerator assigns splits once all readers are registered registerReader(context, enumerator, SUBTASK0); - assertThat(context.getSplitsAssignmentSequence(), Matchers.emptyIterable()); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); registerReader(context, enumerator, SUBTASK1); - assertThat(context.getSplitsAssignmentSequence(), Matchers.emptyIterable()); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); enumerator.handleSourceEvent(SUBTASK0, new SourceReaderFinishedEvent(-1)); - assertThat(context.getSplitsAssignmentSequence(), Matchers.iterableWithSize(0)); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); enumerator.handleSourceEvent(SUBTASK1, new SourceReaderFinishedEvent(-1)); - assertThat(context.getSplitsAssignmentSequence(), Matchers.iterableWithSize(1)); + assertThat(context.getSplitsAssignmentSequence()).hasSize(1); splitFromSource0 = context.getSplitsAssignmentSequence().get(0).assignment().get(SUBTASK0).get(0); - assertEquals(0, splitFromSource0.sourceIndex()); - assertEquals(0, getCurrentSourceIndex(enumerator)); + assertThat(splitFromSource0.sourceIndex()).isEqualTo(0); + assertThat(getCurrentSourceIndex(enumerator)).isEqualTo(0); // trigger source switch enumerator.handleSourceEvent(SUBTASK0, new SourceReaderFinishedEvent(0)); - assertEquals("one reader finished", 0, getCurrentSourceIndex(enumerator)); + assertThat(getCurrentSourceIndex(enumerator)).as("one reader finished").isEqualTo(0); enumerator.handleSourceEvent(SUBTASK1, new SourceReaderFinishedEvent(0)); - assertEquals("both readers finished", 1, getCurrentSourceIndex(enumerator)); - assertThat( - "switch triggers split assignment", - context.getSplitsAssignmentSequence(), - Matchers.iterableWithSize(2)); + assertThat(getCurrentSourceIndex(enumerator)).as("both readers finished").isEqualTo(1); + assertThat(context.getSplitsAssignmentSequence()) + .as("switch triggers split assignment") + .hasSize(2); splitFromSource1 = context.getSplitsAssignmentSequence().get(1).assignment().get(SUBTASK0).get(0); - assertEquals(1, splitFromSource1.sourceIndex()); + assertThat(splitFromSource1.sourceIndex()).isEqualTo(1); enumerator.handleSourceEvent(SUBTASK1, new SourceReaderFinishedEvent(SUBTASK1)); - assertEquals("reader without assignment", 1, getCurrentSourceIndex(enumerator)); + assertThat(getCurrentSourceIndex(enumerator)).as("reader without assignment").isEqualTo(1); } @Test @@ -99,7 +96,7 @@ public void testRegisterReaderAfterSwitchAndReaderReset() { context.getSplitsAssignmentSequence().clear(); enumerator.addReader(SUBTASK0); enumerator.addSplitsBack(Collections.singletonList(splitFromSource0), SUBTASK0); - assertThat(context.getSplitsAssignmentSequence(), Matchers.iterableWithSize(0)); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); enumerator.handleSourceEvent(SUBTASK0, new SourceReaderFinishedEvent(-1)); assertSplitAssignment( "addSplitsBack triggers assignment when reader registered", @@ -112,12 +109,11 @@ public void testRegisterReaderAfterSwitchAndReaderReset() { context.getSplitsAssignmentSequence().clear(); context.unregisterReader(SUBTASK0); enumerator.addSplitsBack(Collections.singletonList(splitFromSource0), SUBTASK0); - assertThat( - "addSplitsBack doesn't trigger assignment when reader not registered", - context.getSplitsAssignmentSequence(), - Matchers.emptyIterable()); + assertThat(context.getSplitsAssignmentSequence()) + .as("addSplitsBack doesn't trigger assignment when reader not registered") + .isEmpty(); registerReader(context, enumerator, SUBTASK0); - assertThat(context.getSplitsAssignmentSequence(), Matchers.iterableWithSize(0)); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); enumerator.handleSourceEvent(SUBTASK0, new SourceReaderFinishedEvent(-1)); assertSplitAssignment( "registerReader triggers assignment", context, 1, splitFromSource0, SUBTASK0); @@ -134,13 +130,13 @@ public void testHandleSplitRequestAfterSwitchAndReaderReset() { List mockSourceSplits = (List) Whitebox.getInternalState(underlyingEnumeratorWrapper.enumerator, "splits"); - assertThat(mockSourceSplits, Matchers.emptyIterable()); + assertThat(mockSourceSplits).isEmpty(); // simulate reader reset to before switch by adding split of previous source back context.getSplitsAssignmentSequence().clear(); - assertEquals("current enumerator", 1, getCurrentSourceIndex(enumerator)); + assertThat(getCurrentSourceIndex(enumerator)).as("current enumerator").isEqualTo(1); - assertThat(underlyingEnumeratorWrapper.handleSplitRequests, Matchers.emptyIterable()); + assertThat(underlyingEnumeratorWrapper.handleSplitRequests).isEmpty(); enumerator.handleSplitRequest(SUBTASK0, "fakehostname"); SwitchedSources switchedSources = new SwitchedSources(); @@ -158,7 +154,7 @@ public void testHandleSplitRequestAfterSwitchAndReaderReset() { enumerator.addSplitsBack(Collections.singletonList(splitFromSource0), SUBTASK0); try { enumerator.handleSplitRequest(SUBTASK0, "fakehostname"); - Assert.fail("expected exception"); + fail("expected exception"); } catch (IllegalStateException ex) { } } @@ -170,16 +166,18 @@ public void testRestoreEnumerator() throws Exception { enumerator.start(); HybridSourceEnumeratorState enumeratorState = enumerator.snapshotState(0); MockSplitEnumerator underlyingEnumerator = getCurrentEnumerator(enumerator); - Assert.assertThat( - (List) Whitebox.getInternalState(underlyingEnumerator, "splits"), - Matchers.iterableWithSize(1)); + assertThat( + (List) + Whitebox.getInternalState(underlyingEnumerator, "splits")) + .hasSize(1); enumerator = (HybridSourceSplitEnumerator) source.restoreEnumerator(context, enumeratorState); enumerator.start(); underlyingEnumerator = getCurrentEnumerator(enumerator); - Assert.assertThat( - (List) Whitebox.getInternalState(underlyingEnumerator, "splits"), - Matchers.iterableWithSize(1)); + assertThat( + (List) + Whitebox.getInternalState(underlyingEnumerator, "splits")) + .hasSize(1); } @Test @@ -251,15 +249,15 @@ private static void assertSplitAssignment( int size, HybridSourceSplit split, int subtask) { - assertThat(reason, context.getSplitsAssignmentSequence(), Matchers.iterableWithSize(size)); - assertEquals( - reason, - split, - context.getSplitsAssignmentSequence() - .get(size - 1) - .assignment() - .get(subtask) - .get(0)); + assertThat(context.getSplitsAssignmentSequence()).as(reason).hasSize(size); + assertThat( + context.getSplitsAssignmentSequence() + .get(size - 1) + .assignment() + .get(subtask) + .get(0)) + .as(reason) + .isEqualTo(split); } private static void registerReader( diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitSerializerTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitSerializerTest.java index e2db86ed69c04..2405183256ac2 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitSerializerTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitSerializerTest.java @@ -21,13 +21,15 @@ import org.apache.flink.api.connector.source.Source; import org.apache.flink.api.connector.source.mocks.MockSource; -import org.junit.Assert; import org.junit.Test; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + /** Tests for {@link HybridSourceSplitSerializer}. */ public class HybridSourceSplitSerializerTest { @@ -40,13 +42,9 @@ public void testSerialization() throws Exception { HybridSourceSplit split = new HybridSourceSplit(0, splitBytes, 0, "splitId"); byte[] serialized = serializer.serialize(split); HybridSourceSplit clonedSplit = serializer.deserialize(0, serialized); - Assert.assertEquals(split, clonedSplit); - - try { - serializer.deserialize(1, serialized); - Assert.fail(); - } catch (IOException e) { - // expected invalid version - } + assertThat(clonedSplit).isEqualTo(split); + + assertThatThrownBy(() -> serializer.deserialize(1, serialized)) + .isInstanceOf(IOException.class); } } diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java index 786677b2f9015..c9145182fef7b 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java @@ -29,9 +29,8 @@ import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests for {@link HybridSource}. */ public class HybridSourceTest { @@ -44,13 +43,13 @@ public void testBoundedness() { HybridSource.builder(new MockBaseSource(1, 1, Boundedness.BOUNDED)) .addSource(new MockBaseSource(1, 1, Boundedness.BOUNDED)) .build(); - assertEquals(Boundedness.BOUNDED, source.getBoundedness()); + assertThat(source.getBoundedness()).isEqualTo(Boundedness.BOUNDED); source = HybridSource.builder(new MockBaseSource(1, 1, Boundedness.BOUNDED)) .addSource(new MockBaseSource(1, 1, Boundedness.CONTINUOUS_UNBOUNDED)) .build(); - assertEquals(Boundedness.CONTINUOUS_UNBOUNDED, source.getBoundedness()); + assertThat(source.getBoundedness()).isEqualTo(Boundedness.CONTINUOUS_UNBOUNDED); try { HybridSource.builder(new MockBaseSource(1, 1, Boundedness.CONTINUOUS_UNBOUNDED)) @@ -82,7 +81,7 @@ public void testBuilderWithSourceFactory() { new MockBaseSource(1, 1, Boundedness.BOUNDED)) .addSource(sourceFactory, Boundedness.BOUNDED) .build(); - assertNotNull(source); + assertThat(source).isNotNull(); } private static class ExtendedMockSplitEnumerator extends MockSplitEnumerator { @@ -110,6 +109,6 @@ public void testBuilderWithEnumeratorSuperclass() { new MockBaseSource(1, 1, Boundedness.BOUNDED)) .addSource(sourceFactory, Boundedness.BOUNDED) .build(); - assertNotNull(source); + assertThat(source).isNotNull(); } } diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceITCase.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceITCase.java index 4c23973e712d5..438ff218e6315 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceITCase.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceITCase.java @@ -43,7 +43,7 @@ import java.util.Collections; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** IT case for the {@link Source} with a coordinator. */ public class CoordinatedSourceITCase extends AbstractTestBase { @@ -116,9 +116,9 @@ public void invoke(Integer value, Context context) throws Exception { }); List result = env.execute().getAccumulatorResult("result"); Collections.sort(result); - assertEquals(numRecords, result.size()); - assertEquals(0, (int) result.get(0)); - assertEquals(numRecords - 1, (int) result.get(result.size() - 1)); + assertThat(result).hasSize(numRecords); + assertThat((int) result.get(0)).isEqualTo(0); + assertThat((int) result.get(result.size() - 1)).isEqualTo(numRecords - 1); } // ------------------------------------------------------------------------ diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceRescaleITCase.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceRescaleITCase.java index 3a23ba8681cc3..db3a3fe81561c 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceRescaleITCase.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/CoordinatedSourceRescaleITCase.java @@ -24,7 +24,6 @@ import org.apache.flink.configuration.Configuration; import org.apache.flink.configuration.MemorySize; import org.apache.flink.configuration.TaskManagerOptions; -import org.apache.flink.core.testutils.FlinkMatchers; import org.apache.flink.runtime.jobgraph.SavepointConfigOptions; import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration; import org.apache.flink.streaming.api.datastream.DataStream; @@ -48,9 +47,10 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.Comparator; +import static org.apache.flink.core.testutils.FlinkAssertions.anyCauseMatches; import static org.apache.flink.runtime.state.filesystem.AbstractFsCheckpointStorageAccess.CHECKPOINT_DIR_PREFIX; import static org.apache.flink.runtime.state.filesystem.AbstractFsCheckpointStorageAccess.METADATA_FILE_NAME; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests if the coordinator handles up and downscaling. */ public class CoordinatedSourceRescaleITCase extends TestLogger { @@ -85,16 +85,13 @@ public void testUpscaling() throws Exception { private File generateCheckpoint(File checkpointDir, int p) throws IOException { final StreamExecutionEnvironment env = createEnv(checkpointDir, null, p); - try { - env.execute("create checkpoint"); - throw new AssertionError("No checkpoint"); - } catch (Exception e) { - assertThat(e, FlinkMatchers.containsMessage(CREATED_CHECKPOINT)); - return Files.find(checkpointDir.toPath(), 2, this::isCompletedCheckpoint) - .max(Comparator.comparing(Path::toString)) - .map(Path::toFile) - .orElseThrow(() -> new IllegalStateException("Cannot generate checkpoint", e)); - } + assertThatThrownBy(() -> env.execute("create checkpoint")) + .satisfies(anyCauseMatches(CREATED_CHECKPOINT)); + + return Files.find(checkpointDir.toPath(), 2, this::isCompletedCheckpoint) + .max(Comparator.comparing(Path::toString)) + .map(Path::toFile) + .orElseThrow(() -> new IllegalStateException("Cannot generate checkpoint")); } private boolean isCompletedCheckpoint(Path path, BasicFileAttributes attr) { @@ -106,12 +103,8 @@ private boolean isCompletedCheckpoint(Path path, BasicFileAttributes attr) { private void resumeCheckpoint(File checkpointDir, File restoreCheckpoint, int p) { final StreamExecutionEnvironment env = createEnv(checkpointDir, restoreCheckpoint, p); - try { - env.execute("resume checkpoint"); - throw new AssertionError("No success error"); - } catch (Exception e) { - assertThat(e, FlinkMatchers.containsMessage(RESTORED_CHECKPOINT)); - } + assertThatThrownBy(() -> env.execute("resume checkpoint")) + .satisfies(anyCauseMatches(RESTORED_CHECKPOINT)); } private StreamExecutionEnvironment createEnv( diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java index d298a263c7afd..3847ffc98dc52 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java @@ -56,13 +56,12 @@ import static org.apache.flink.metrics.testutils.MetricMatchers.isCounter; import static org.apache.flink.metrics.testutils.MetricMatchers.isGauge; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThan; -import static org.junit.Assert.assertThat; /** Tests whether all provided metrics of a {@link Source} are of the expected values (FLIP-33). */ public class SourceMetricsITCase extends TestLogger { @@ -169,7 +168,7 @@ private void assertSourceMetrics( boolean hasTimestamps) { List groups = reporter.findOperatorMetricGroups(jobId, "MetricTestingSource"); - assertThat(groups, hasSize(parallelism)); + assertThat(groups).hasSize(parallelism); int subtaskWithMetrics = 0; for (OperatorMetricGroup group : groups) { @@ -177,37 +176,39 @@ private void assertSourceMetrics( // there are only 2 splits assigned; so two groups will not update metrics if (group.getIOMetricGroup().getNumRecordsInCounter().getCount() == 0) { // assert that optional metrics are not initialized when no split assigned - assertThat( - metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), - isGauge(equalTo(InternalSourceReaderMetricGroup.UNDEFINED))); - assertThat(metrics.get(MetricNames.WATERMARK_LAG), nullValue()); + assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) + .satisfies( + matching( + isGauge( + equalTo( + InternalSourceReaderMetricGroup + .UNDEFINED)))); + assertThat(metrics.get(MetricNames.WATERMARK_LAG)).isNull(); continue; } subtaskWithMetrics++; // I/O metrics - assertThat( - group.getIOMetricGroup().getNumRecordsInCounter(), - isCounter(equalTo(processedRecordsPerSubtask))); - assertThat( - group.getIOMetricGroup().getNumBytesInCounter(), - isCounter( - equalTo( - processedRecordsPerSubtask - * MockRecordEmitter.RECORD_SIZE_IN_BYTES))); + assertThat(group.getIOMetricGroup().getNumRecordsInCounter()) + .satisfies(matching(isCounter(equalTo(processedRecordsPerSubtask)))); + assertThat(group.getIOMetricGroup().getNumBytesInCounter()) + .satisfies( + matching( + isCounter( + equalTo( + processedRecordsPerSubtask + * MockRecordEmitter + .RECORD_SIZE_IN_BYTES)))); // MockRecordEmitter is just incrementing errors every even record - assertThat( - metrics.get(MetricNames.NUM_RECORDS_IN_ERRORS), - isCounter(equalTo(processedRecordsPerSubtask / 2))); + assertThat(metrics.get(MetricNames.NUM_RECORDS_IN_ERRORS)) + .satisfies(matching(isCounter(equalTo(processedRecordsPerSubtask / 2)))); if (hasTimestamps) { // Timestamp assigner subtracting EVENTTIME_LAG from wall clock - assertThat( - metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), - isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON))); + assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) + .satisfies(matching(isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)))); // Watermark is derived from timestamp, so it has to be in the same order of // magnitude - assertThat( - metrics.get(MetricNames.WATERMARK_LAG), - isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON))); + assertThat(metrics.get(MetricNames.WATERMARK_LAG)) + .satisfies(matching(isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)))); // Calculate the additional watermark lag (on top of event time lag) Long watermarkLag = ((Gauge) metrics.get(MetricNames.WATERMARK_LAG)).getValue() @@ -216,24 +217,36 @@ private void assertSourceMetrics( MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) .getValue(); // That should correspond to the out-of-order boundedness - assertThat(watermarkLag, isCloseTo(WATERMARK_LAG, WATERMARK_EPSILON)); + assertThat(watermarkLag) + .satisfies(matching(isCloseTo(WATERMARK_LAG, WATERMARK_EPSILON))); } else { // assert that optional metrics are not initialized when no timestamp assigned - assertThat( - metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG), - isGauge(equalTo(InternalSourceReaderMetricGroup.UNDEFINED))); - assertThat(metrics.get(MetricNames.WATERMARK_LAG), nullValue()); + assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) + .satisfies( + matching( + isGauge( + equalTo( + InternalSourceReaderMetricGroup + .UNDEFINED)))); + assertThat(metrics.get(MetricNames.WATERMARK_LAG)).isNull(); } long pendingRecords = numTotalPerSubtask - processedRecordsPerSubtask; - assertThat(metrics.get(MetricNames.PENDING_RECORDS), isGauge(equalTo(pendingRecords))); - assertThat( - metrics.get(MetricNames.PENDING_BYTES), - isGauge(equalTo(pendingRecords * MockRecordEmitter.RECORD_SIZE_IN_BYTES))); + assertThat(metrics.get(MetricNames.PENDING_RECORDS)) + .satisfies(matching(isGauge(equalTo(pendingRecords)))); + assertThat(metrics.get(MetricNames.PENDING_BYTES)) + .satisfies( + matching( + isGauge( + equalTo( + pendingRecords + * MockRecordEmitter + .RECORD_SIZE_IN_BYTES)))); // test is keeping source idle time metric busy with the barrier - assertThat(metrics.get(MetricNames.SOURCE_IDLE_TIME), isGauge(equalTo(0L))); + assertThat(metrics.get(MetricNames.SOURCE_IDLE_TIME)) + .satisfies(matching(isGauge(equalTo(0L)))); } - assertThat(subtaskWithMetrics, equalTo(numSplits)); + assertThat(subtaskWithMetrics).isEqualTo(numSplits); } private Matcher isCloseTo(long value, long epsilon) { diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManagerTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManagerTest.java index 00fe22c8ba67a..1874c2e9a4382 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManagerTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherManagerTest.java @@ -28,7 +28,6 @@ import org.apache.flink.connector.base.source.reader.synchronization.FutureCompletingBlockingQueue; import org.apache.flink.core.testutils.OneShotLatch; -import org.apache.commons.lang3.exception.ExceptionUtils; import org.junit.Test; import java.io.IOException; @@ -37,10 +36,9 @@ import java.util.Collections; import java.util.Queue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; /** Unit tests for the {@link SplitFetcherManager}. */ public class SplitFetcherManagerTest { @@ -64,13 +62,8 @@ public void testCloseFetcherWithException() throws Exception { SplitFetcherManager fetcherManager = createFetcher("test-split", new FutureCompletingBlockingQueue<>(), reader); fetcherManager.close(1000L); - try { - fetcherManager.checkErrors(); - } catch (Exception e) { - assertEquals( - "Artificial exception on closing the split reader.", - ExceptionUtils.getRootCause(e).getMessage()); - } + assertThatThrownBy(fetcherManager::checkErrors) + .hasRootCauseMessage("Artificial exception on closing the split reader."); } // the final modifier is important so that '@SafeVarargs' is accepted on Java 8 @@ -90,7 +83,7 @@ private final void testExceptionPropagation( reader.awaitAllRecordsReturned(); drainQueue(queue); - assertFalse(queue.getAvailabilityFuture().isDone()); + assertThat(queue.getAvailabilityFuture().isDone()).isFalse(); reader.triggerThrowException(); // await the error propagation @@ -100,7 +93,7 @@ private final void testExceptionPropagation( fetcher.checkErrors(); fail("expected exception"); } catch (Exception e) { - assertSame(testingException, e.getCause().getCause()); + assertThat(e.getCause().getCause()).isSameAs(testingException); } finally { fetcher.close(20_000L); } diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java index 8d9c99d79ae83..00f119662492a 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java @@ -40,9 +40,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Unit test for {@link SplitFetcher}. */ public class SplitFetcherTest { @@ -51,7 +49,7 @@ public class SplitFetcherTest { public void testNewFetcherIsIdle() { final SplitFetcher fetcher = createFetcher(new TestingSplitReader<>()); - assertTrue(fetcher.isIdle()); + assertThat(fetcher.isIdle()).isTrue(); } @Test @@ -62,12 +60,12 @@ public void testFetcherNotIdleAfterSplitAdded() { fetcher.addSplits(Collections.singletonList(split)); - assertFalse(fetcher.isIdle()); + assertThat(fetcher.isIdle()).isFalse(); // need to loop here because the internal wakeup flag handling means we need multiple loops while (fetcher.assignedSplits().isEmpty()) { fetcher.runOnce(); - assertFalse(fetcher.isIdle()); + assertThat(fetcher.isIdle()).isFalse(); } } @@ -79,8 +77,8 @@ public void testIdleAfterFinishedSplitsEnqueued() { fetcher.runOnce(); - assertTrue(fetcher.assignedSplits().isEmpty()); - assertTrue(fetcher.isIdle()); + assertThat(fetcher.assignedSplits()).isEmpty(); + assertThat(fetcher.isIdle()).isTrue(); } @Test @@ -95,9 +93,9 @@ public void testNotifiesWhenGoingIdle() { fetcher.runOnce(); - assertTrue(fetcher.assignedSplits().isEmpty()); - assertTrue(fetcher.isIdle()); - assertTrue(queue.getAvailabilityFuture().isDone()); + assertThat(fetcher.assignedSplits()).isEmpty(); + assertThat(fetcher.isIdle()).isTrue(); + assertThat(queue.getAvailabilityFuture().isDone()).isTrue(); } @Test @@ -114,9 +112,9 @@ public void testNotifiesOlderFutureWhenGoingIdle() { fetcher.runOnce(); - assertTrue(fetcher.assignedSplits().isEmpty()); - assertTrue(fetcher.isIdle()); - assertTrue(future.isDone()); + assertThat(fetcher.assignedSplits()).isEmpty(); + assertThat(fetcher.isIdle()).isTrue(); + assertThat(future.isDone()).isTrue(); } @Test @@ -140,7 +138,8 @@ public void testNotifiesWhenGoingIdleConcurrent() throws Exception { // (thread finished) // or the fetcher was already idle when the thread drained the queue (then we need no // additional notification) - assertTrue(queue.getAvailabilityFuture().isDone() || queueDrainer.wasIdleWhenFinished()); + assertThat(queue.getAvailabilityFuture().isDone() || queueDrainer.wasIdleWhenFinished()) + .isTrue(); } @Test @@ -159,7 +158,7 @@ public void testNotifiesOlderFutureWhenGoingIdleConcurrent() throws Exception { final CompletableFuture future = queue.getAvailabilityFuture(); fetcher.runOnce(); - assertTrue(future.isDone()); + assertThat(future.isDone()).isTrue(); queueDrainer.sync(); } @@ -230,15 +229,15 @@ public void run() { while (nextBatch.nextSplit() != null) { int[] arr; while ((arr = nextBatch.nextRecordFromSplit()) != null) { - assertTrue(recordsRead.add(arr[0])); + assertThat(recordsRead.add(arr[0])).isTrue(); } } } - assertEquals(numTotalRecords, recordsRead.size()); - assertEquals(0, (int) recordsRead.first()); - assertEquals(numTotalRecords - 1, (int) recordsRead.last()); - assertTrue(wakeupTimes.get() > 0); + assertThat(recordsRead).hasSize(numTotalRecords); + assertThat((int) recordsRead.first()).isEqualTo(0); + assertThat((int) recordsRead.last()).isEqualTo(numTotalRecords - 1); + assertThat(wakeupTimes.get()).isGreaterThan(0); } finally { stop.set(true); fetcher.shutdown(); @@ -253,7 +252,7 @@ public void testClose() { final SplitFetcher fetcher = createFetcher(splitReader); fetcher.shutdown(); fetcher.run(); - assertTrue(splitReader.isClosed()); + assertThat(splitReader.isClosed()).isTrue(); } // ------------------------------------------------------------------------ diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueueTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueueTest.java index d90f4be68151b..57b4e0dba0d5f 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueueTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/synchronization/FutureCompletingBlockingQueueTest.java @@ -30,13 +30,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** The unit test for {@link FutureCompletingBlockingQueue}. */ public class FutureCompletingBlockingQueueTest { @@ -48,22 +43,22 @@ public void testBasics() throws InterruptedException { FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(5); CompletableFuture future = queue.getAvailabilityFuture(); - assertTrue(queue.isEmpty()); - assertEquals(0, queue.size()); + assertThat(queue.isEmpty()).isTrue(); + assertThat(queue.size()).isEqualTo(0); queue.put(0, 1234); - assertTrue(future.isDone()); - assertEquals(1, queue.size()); - assertFalse(queue.isEmpty()); - assertEquals(4, queue.remainingCapacity()); - assertNotNull(queue.peek()); - assertEquals(1234, (int) queue.peek()); - assertEquals(1234, (int) queue.poll()); - - assertEquals(0, queue.size()); - assertTrue(queue.isEmpty()); - assertEquals(5, queue.remainingCapacity()); + assertThat(future.isDone()).isTrue(); + assertThat(queue.size()).isEqualTo(1); + assertThat(queue.isEmpty()).isFalse(); + assertThat(queue.remainingCapacity()).isEqualTo(4); + assertThat(queue.peek()).isNotNull(); + assertThat((int) queue.peek()).isEqualTo(1234); + assertThat((int) queue.poll()).isEqualTo(1234); + + assertThat(queue.size()).isEqualTo(0); + assertThat(queue.isEmpty()).isTrue(); + assertThat(queue.remainingCapacity()).isEqualTo(5); } @Test @@ -71,8 +66,8 @@ public void testPoll() throws InterruptedException { FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); queue.put(0, 1234); Integer value = queue.poll(); - assertNotNull(value); - assertEquals(1234, (int) value); + assertThat(value).isNotNull(); + assertThat((int) value).isEqualTo(1234); } @Test @@ -80,9 +75,9 @@ public void testPollEmptyQueue() throws InterruptedException { FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); queue.put(0, 1234); - assertNotNull(queue.poll()); - assertNull(queue.poll()); - assertNull(queue.poll()); + assertThat(queue.poll()).isNotNull(); + assertThat(queue.poll()).isNull(); + assertThat(queue.poll()).isNull(); } @Test @@ -93,8 +88,8 @@ public void testWakeUpPut() throws InterruptedException { new Thread( () -> { try { - assertTrue(queue.put(0, 1234)); - assertFalse(queue.put(0, 1234)); + assertThat(queue.put(0, 1234)).isTrue(); + assertThat(queue.put(0, 1234)).isFalse(); latch.countDown(); } catch (InterruptedException e) { fail("Interrupted unexpectedly."); @@ -104,7 +99,7 @@ public void testWakeUpPut() throws InterruptedException { queue.wakeUpPuttingThread(0); latch.await(); - assertEquals(0, latch.getCount()); + assertThat(latch.getCount()).isEqualTo(0); } @Test @@ -165,29 +160,28 @@ public void testSpecifiedQueueCapacity() { final int capacity = 8_000; final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(capacity); - assertEquals(capacity, queue.remainingCapacity()); + assertThat(queue.remainingCapacity()).isEqualTo(capacity); } @Test public void testQueueDefaultCapacity() { final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); - assertEquals(DEFAULT_CAPACITY, queue.remainingCapacity()); - assertEquals( - DEFAULT_CAPACITY, - SourceReaderOptions.ELEMENT_QUEUE_CAPACITY.defaultValue().intValue()); + assertThat(queue.remainingCapacity()).isEqualTo(DEFAULT_CAPACITY); + assertThat(SourceReaderOptions.ELEMENT_QUEUE_CAPACITY.defaultValue().intValue()) + .isEqualTo(DEFAULT_CAPACITY); } @Test public void testUnavailableWhenEmpty() { final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); - assertFalse(queue.getAvailabilityFuture().isDone()); + assertThat(queue.getAvailabilityFuture().isDone()).isFalse(); } @Test public void testImmediatelyAvailableAfterPut() throws InterruptedException { final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); queue.put(0, new Object()); - assertTrue(queue.getAvailabilityFuture().isDone()); + assertThat(queue.getAvailabilityFuture().isDone()).isTrue(); } @Test @@ -195,7 +189,7 @@ public void testFutureBecomesAvailableAfterPut() throws InterruptedException { final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); final CompletableFuture future = queue.getAvailabilityFuture(); queue.put(0, new Object()); - assertTrue(future.isDone()); + assertThat(future.isDone()).isTrue(); } @Test @@ -203,14 +197,14 @@ public void testUnavailableWhenBecomesEmpty() throws InterruptedException { final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); queue.put(0, new Object()); queue.poll(); - assertFalse(queue.getAvailabilityFuture().isDone()); + assertThat(queue.getAvailabilityFuture().isDone()).isFalse(); } @Test public void testAvailableAfterNotifyAvailable() throws InterruptedException { final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); queue.notifyAvailable(); - assertTrue(queue.getAvailabilityFuture().isDone()); + assertThat(queue.getAvailabilityFuture().isDone()).isTrue(); } @Test @@ -218,7 +212,7 @@ public void testFutureBecomesAvailableAfterNotifyAvailable() throws InterruptedE final FutureCompletingBlockingQueue queue = new FutureCompletingBlockingQueue<>(); final CompletableFuture future = queue.getAvailabilityFuture(); queue.notifyAvailable(); - assertTrue(future.isDone()); + assertThat(future.isDone()).isTrue(); } @Test @@ -230,8 +224,8 @@ public void testPollResetsAvailability() throws InterruptedException { queue.poll(); final CompletableFuture afterPoll = queue.getAvailabilityFuture(); - assertTrue(beforePoll.isDone()); - assertFalse(afterPoll.isDone()); + assertThat(beforePoll.isDone()).isTrue(); + assertThat(afterPoll.isDone()).isFalse(); } /** @@ -241,6 +235,7 @@ public void testPollResetsAvailability() throws InterruptedException { */ @Test public void testQueueUsesShortCircuitFuture() { - assertSame(AvailabilityProvider.AVAILABLE, FutureCompletingBlockingQueue.AVAILABLE); + assertThat(FutureCompletingBlockingQueue.AVAILABLE) + .isSameAs(AvailabilityProvider.AVAILABLE); } } diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/utils/SerdeUtilsTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/utils/SerdeUtilsTest.java index e94062643f833..12594868af8a0 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/utils/SerdeUtilsTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/utils/SerdeUtilsTest.java @@ -30,7 +30,7 @@ import java.util.Map; import java.util.Set; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link SerdeUtils}. */ public class SerdeUtilsTest { @@ -65,7 +65,7 @@ public void testSerdeSplitAssignments() throws IOException { new TestingSourceSplitSerializer(), HashSet::new); - assertEquals(splitAssignments, deseredSplitAssignments); + assertThat(deseredSplitAssignments).isEqualTo(splitAssignments); } private static class TestingSourceSplitSerializer From 425d1c49374c3f590fe669ac9b4a348487f61eca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 6 May 2022 11:07:53 +0200 Subject: [PATCH 02/16] [FLINK-27185][connector] Convert connector-cassandra module to assertj Co-authored-by: slinkydeveloper --- .../cassandra/CassandraConnectorITCase.java | 98 +++++++++---------- .../cassandra/CassandraSinkBaseTest.java | 86 ++++++++-------- .../CassandraTupleWriteAheadSinkTest.java | 4 +- 3 files changed, 89 insertions(+), 99 deletions(-) diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java index de14e861ccb20..57fc14c50d8a2 100644 --- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java +++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java @@ -63,7 +63,6 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -93,9 +92,9 @@ import scala.collection.JavaConverters; import scala.collection.Seq; -import static org.hamcrest.MatcherAssert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.samePropertyValuesAs; -import static org.junit.Assert.assertTrue; /** IT cases for all cassandra sinks. */ @SuppressWarnings("serial") @@ -412,7 +411,7 @@ public void testAnnotatePojoWithTable() { final Class annotatedPojoClass = annotatePojoWithTable(KEYSPACE, tableName); final Table pojoTableAnnotation = annotatedPojoClass.getAnnotation(Table.class); - assertTrue(pojoTableAnnotation.name().contains(tableName)); + assertThat(pojoTableAnnotation.name().contains(tableName)).isTrue(); } @Test @@ -425,9 +424,9 @@ public void testRaiseCassandraRequestsTimeouts() throws IOException { "/etc/cassandra/cassandra.yaml", configurationPath.toAbsolutePath().toString()); final String configuration = new String(Files.readAllBytes(configurationPath), StandardCharsets.UTF_8); - assertTrue(configuration.contains("request_timeout_in_ms: 30000")); - assertTrue(configuration.contains("read_request_timeout_in_ms: 15000")); - assertTrue(configuration.contains("write_request_timeout_in_ms: 6000")); + assertThat(configuration).contains("request_timeout_in_ms: 30000"); + assertThat(configuration).contains("read_request_timeout_in_ms: 15000"); + assertThat(configuration).contains("write_request_timeout_in_ms: 6000"); } // ------------------------------------------------------------------------ @@ -468,9 +467,9 @@ protected void verifyResultsIdealCircumstances( for (com.datastax.driver.core.Row s : result) { list.remove(new Integer(s.getInt(TUPLE_COUNTER_FIELD))); } - Assert.assertTrue( - "The following ID's were not found in the ResultSet: " + list.toString(), - list.isEmpty()); + assertThat(list) + .as("The following ID's were not found in the ResultSet: " + list.toString()) + .isEmpty(); } @Override @@ -486,9 +485,9 @@ protected void verifyResultsDataPersistenceUponMissedNotify( for (com.datastax.driver.core.Row s : result) { list.remove(new Integer(s.getInt(TUPLE_COUNTER_FIELD))); } - Assert.assertTrue( - "The following ID's were not found in the ResultSet: " + list.toString(), - list.isEmpty()); + assertThat(list) + .as("The following ID's were not found in the ResultSet: " + list.toString()) + .isEmpty(); } @Override @@ -507,9 +506,9 @@ protected void verifyResultsDataDiscardingUponRestore( for (com.datastax.driver.core.Row s : result) { list.remove(new Integer(s.getInt(TUPLE_COUNTER_FIELD))); } - Assert.assertTrue( - "The following ID's were not found in the ResultSet: " + list.toString(), - list.isEmpty()); + assertThat(list) + .as("The following ID's were not found in the ResultSet: " + list.toString()) + .isEmpty(); } @Override @@ -536,7 +535,7 @@ protected void verifyResultsWhenReScaling( } Collections.sort(actual); - Assert.assertArrayEquals(expected.toArray(), actual.toArray()); + assertThat(actual.toArray()).isEqualTo(expected.toArray()); } @Test @@ -560,18 +559,18 @@ public void testCassandraCommitter() throws Exception { cc2.open(); cc3.open(); - Assert.assertFalse(cc1.isCheckpointCommitted(0, 1)); - Assert.assertFalse(cc2.isCheckpointCommitted(1, 1)); - Assert.assertFalse(cc3.isCheckpointCommitted(0, 1)); + assertThat(cc1.isCheckpointCommitted(0, 1)).isFalse(); + assertThat(cc2.isCheckpointCommitted(1, 1)).isFalse(); + assertThat(cc3.isCheckpointCommitted(0, 1)).isFalse(); cc1.commitCheckpoint(0, 1); - Assert.assertTrue(cc1.isCheckpointCommitted(0, 1)); + assertThat(cc1.isCheckpointCommitted(0, 1)).isTrue(); // verify that other sub-tasks aren't affected - Assert.assertFalse(cc2.isCheckpointCommitted(1, 1)); + assertThat(cc2.isCheckpointCommitted(1, 1)).isFalse(); // verify that other tasks aren't affected - Assert.assertFalse(cc3.isCheckpointCommitted(0, 1)); + assertThat(cc3.isCheckpointCommitted(0, 1)).isFalse(); - Assert.assertFalse(cc1.isCheckpointCommitted(0, 2)); + assertThat(cc1.isCheckpointCommitted(0, 2)).isFalse(); cc1.close(); cc2.close(); @@ -585,8 +584,8 @@ public void testCassandraCommitter() throws Exception { // verify that checkpoint data is not destroyed within open/close and not reliant on // internally cached data - Assert.assertTrue(cc1.isCheckpointCommitted(0, 1)); - Assert.assertFalse(cc1.isCheckpointCommitted(0, 2)); + assertThat(cc1.isCheckpointCommitted(0, 1)).isTrue(); + assertThat(cc1.isCheckpointCommitted(0, 2)).isFalse(); cc1.close(); } @@ -609,7 +608,7 @@ public void testCassandraTupleAtLeastOnceSink() throws Exception { } ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); - Assert.assertEquals(20, rs.all().size()); + assertThat(rs.all()).hasSize(20); } @Test @@ -627,7 +626,7 @@ public void testCassandraRowAtLeastOnceSink() throws Exception { } ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); - Assert.assertEquals(20, rs.all().size()); + assertThat(rs.all()).hasSize(20); } @Test @@ -637,7 +636,7 @@ public void testCassandraPojoAtLeastOnceSink() throws Exception { writePojos(annotatedPojoClass, null); ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); - Assert.assertEquals(20, rs.all().size()); + assertThat(rs.all()).hasSize(20); } @Test @@ -646,7 +645,7 @@ public void testCassandraPojoNoAnnotatedKeyspaceAtLeastOnceSink() throws Excepti annotatePojoWithTable("", TABLE_NAME_PREFIX + tableID); writePojos(annotatedPojoClass, KEYSPACE); ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); - Assert.assertEquals(20, rs.all().size()); + assertThat(rs.all()).hasSize(20); } private void writePojos(Class annotatedPojoClass, @Nullable String keyspace) @@ -696,12 +695,11 @@ builderForWriting, injectTableName(INSERT_DATA_QUERY)) cmp.setField(0, o.getString(0)); cmp.setField(1, o.getInt(2)); cmp.setField(2, o.getInt(1)); - Assert.assertTrue( - "Row " + cmp + " was written to Cassandra but not in input.", - input.remove(cmp)); + assertThat(input.remove(cmp)) + .as("Row " + cmp + " was written to Cassandra but not in input.") + .isTrue(); } - Assert.assertTrue( - "The input data was not completely written to Cassandra", input.isEmpty()); + assertThat(input).as("The input data was not completely written to Cassandra").isEmpty(); } private static int retrialsCount = 0; @@ -723,11 +721,11 @@ public void testCassandraBatchPojoFormat() throws Exception { final List pojos = writePojosWithOutputFormat(annotatedPojoClass); ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); - Assert.assertEquals(20, rs.all().size()); + assertThat(rs.all()).hasSize(20); final List result = readPojosWithInputFormat(annotatedPojoClass); - Assert.assertEquals(20, result.size()); - assertThat(result, samePropertyValuesAs(pojos)); + assertThat(result).hasSize(20); + assertThat(result).satisfies(matching(samePropertyValuesAs(pojos))); } @Test @@ -770,7 +768,7 @@ public void testCassandraBatchTupleFormat() throws Exception { source.close(); } - Assert.assertEquals(20, result.size()); + assertThat(result).hasSize(20); } @Test @@ -790,7 +788,7 @@ public void testCassandraBatchRowFormat() throws Exception { ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); List rows = rs.all(); - Assert.assertEquals(rowCollection.size(), rows.size()); + assertThat(rows).hasSize(rowCollection.size()); } @Test @@ -820,7 +818,7 @@ public TypeSerializer> createSerializer( CassandraSink.CassandraSinkBuilder> sinkBuilder = CassandraSink.addSink(input); - assertTrue(sinkBuilder instanceof CassandraSink.CassandraScalaProductSinkBuilder); + assertThat(sinkBuilder).isInstanceOf(CassandraSink.CassandraScalaProductSinkBuilder.class); } @Test @@ -844,7 +842,7 @@ public void testCassandraScalaTupleAtLeastSink() throws Exception { ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); List rows = rs.all(); - Assert.assertEquals(scalaTupleCollection.size(), rows.size()); + assertThat(rows).hasSize(scalaTupleCollection.size()); for (com.datastax.driver.core.Row row : rows) { scalaTupleCollection.remove( @@ -853,7 +851,7 @@ public void testCassandraScalaTupleAtLeastSink() throws Exception { row.getInt(TUPLE_COUNTER_FIELD), row.getInt(TUPLE_BATCHID_FIELD))); } - Assert.assertEquals(0, scalaTupleCollection.size()); + assertThat(scalaTupleCollection).isEmpty(); } @Test @@ -884,15 +882,15 @@ public void testCassandraScalaTuplePartialColumnUpdate() throws Exception { ResultSet rs = session.execute(injectTableName(SELECT_DATA_QUERY)); List rows = rs.all(); - Assert.assertEquals(1, rows.size()); + assertThat(rows).hasSize(1); // Since nulls are ignored, we should be reading one complete record for (com.datastax.driver.core.Row row : rows) { - Assert.assertEquals( - new scala.Tuple3<>(id, counter, batchId), - new scala.Tuple3<>( - row.getString(TUPLE_ID_FIELD), - row.getInt(TUPLE_COUNTER_FIELD), - row.getInt(TUPLE_BATCHID_FIELD))); + assertThat( + new scala.Tuple3<>( + row.getString(TUPLE_ID_FIELD), + row.getInt(TUPLE_COUNTER_FIELD), + row.getInt(TUPLE_BATCHID_FIELD))) + .isEqualTo(new scala.Tuple3<>(id, counter, batchId)); } } } diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBaseTest.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBaseTest.java index 4a7b11a9cac7c..1cd7c7eb169a7 100644 --- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBaseTest.java +++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraSinkBaseTest.java @@ -30,7 +30,6 @@ import com.datastax.driver.core.Session; import com.datastax.driver.core.exceptions.NoHostAvailableException; import com.google.common.util.concurrent.ListenableFuture; -import org.junit.Assert; import org.junit.Test; import java.io.IOException; @@ -42,10 +41,9 @@ import java.util.concurrent.TimeoutException; import java.util.function.Function; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.number.OrderingComparison.greaterThan; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.powermock.api.mockito.PowerMockito.when; @@ -84,13 +82,13 @@ public void testSuccessfulPath() throws Exception { casSinkFunc.enqueueCompletableFuture(CompletableFuture.completedFuture(null)); final int originalPermits = casSinkFunc.getAvailablePermits(); - assertThat(originalPermits, greaterThan(0)); - Assert.assertEquals(0, casSinkFunc.getAcquiredPermits()); + assertThat(originalPermits).isGreaterThan(0); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(0); casSinkFunc.invoke("hello"); - Assert.assertEquals(originalPermits, casSinkFunc.getAvailablePermits()); - Assert.assertEquals(0, casSinkFunc.getAcquiredPermits()); + assertThat(casSinkFunc.getAvailablePermits()).isEqualTo(originalPermits); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(0); } } @@ -106,7 +104,7 @@ public void testThrowErrorOnClose() throws Exception { try { casSinkFunc.close(); - Assert.fail("Close should have thrown an exception."); + fail("Close should have thrown an exception."); } catch (IOException e) { ExceptionUtils.findThrowable(e, candidate -> candidate == cause).orElseThrow(() -> e); } @@ -122,10 +120,10 @@ public void testThrowErrorOnInvoke() throws Exception { try { casSinkFunc.invoke("world"); - Assert.fail("Sending of second value should have failed."); + fail("Sending of second value should have failed."); } catch (IOException e) { - Assert.assertEquals(cause, e.getCause()); - Assert.assertEquals(0, casSinkFunc.getAcquiredPermits()); + assertThat(e.getCause()).isEqualTo(cause); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(0); } } } @@ -133,7 +131,7 @@ public void testThrowErrorOnInvoke() throws Exception { @Test(timeout = DEFAULT_TEST_TIMEOUT) public void testIgnoreError() throws Exception { Exception cause = new RuntimeException(); - CassandraFailureHandler failureHandler = failure -> Assert.assertEquals(cause, failure); + CassandraFailureHandler failureHandler = failure -> assertThat(failure).isEqualTo(cause); try (TestCassandraSink casSinkFunc = createOpenedTestCassandraSink(failureHandler)) { @@ -155,14 +153,8 @@ public void testThrowErrorOnSnapshot() throws Exception { casSinkFunc.enqueueCompletableFuture(FutureUtils.completedExceptionally(cause)); casSinkFunc.invoke("hello"); - - try { - testHarness.snapshot(123L, 123L); - - Assert.fail(); - } catch (Exception e) { - Assert.assertTrue(e.getCause() instanceof IOException); - } + assertThatThrownBy(() -> testHarness.snapshot(123L, 123L)) + .hasCauseInstanceOf(IOException.class); } } @@ -176,7 +168,7 @@ public void testWaitForPendingUpdatesOnSnapshot() throws Exception { casSinkFunc.enqueueCompletableFuture(completableFuture); casSinkFunc.invoke("hello"); - Assert.assertEquals(1, casSinkFunc.getAcquiredPermits()); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(1); final CountDownLatch latch = new CountDownLatch(1); Thread t = @@ -192,10 +184,10 @@ public void go() throws Exception { Thread.sleep(5); } - Assert.assertEquals(1, casSinkFunc.getAcquiredPermits()); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(1); completableFuture.complete(null); latch.await(); - Assert.assertEquals(0, casSinkFunc.getAcquiredPermits()); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(0); } } @@ -210,7 +202,7 @@ public void testWaitForPendingUpdatesOnClose() throws Exception { casSinkFunc.enqueueCompletableFuture(completableFuture); casSinkFunc.invoke("hello"); - Assert.assertEquals(1, casSinkFunc.getAcquiredPermits()); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(1); final CountDownLatch latch = new CountDownLatch(1); Thread t = @@ -226,10 +218,10 @@ public void go() throws Exception { Thread.sleep(5); } - Assert.assertEquals(1, casSinkFunc.getAcquiredPermits()); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(1); completableFuture.complete(null); latch.await(); - Assert.assertEquals(0, casSinkFunc.getAcquiredPermits()); + assertThat(casSinkFunc.getAcquiredPermits()).isEqualTo(0); } } @@ -239,20 +231,20 @@ public void testReleaseOnSuccess() throws Exception { CassandraSinkBaseConfig.newBuilder().setMaxConcurrentRequests(1).build(); try (TestCassandraSink testCassandraSink = createOpenedTestCassandraSink(config)) { - Assert.assertEquals(1, testCassandraSink.getAvailablePermits()); - Assert.assertEquals(0, testCassandraSink.getAcquiredPermits()); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(1); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(0); CompletableFuture completableFuture = new CompletableFuture<>(); testCassandraSink.enqueueCompletableFuture(completableFuture); testCassandraSink.invoke("N/A"); - Assert.assertEquals(0, testCassandraSink.getAvailablePermits()); - Assert.assertEquals(1, testCassandraSink.getAcquiredPermits()); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(0); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(1); completableFuture.complete(null); - Assert.assertEquals(1, testCassandraSink.getAvailablePermits()); - Assert.assertEquals(0, testCassandraSink.getAcquiredPermits()); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(1); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(0); } } @@ -264,20 +256,20 @@ public void testReleaseOnFailure() throws Exception { try (TestCassandraSink testCassandraSink = createOpenedTestCassandraSink(config, failureHandler)) { - Assert.assertEquals(1, testCassandraSink.getAvailablePermits()); - Assert.assertEquals(0, testCassandraSink.getAcquiredPermits()); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(1); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(0); CompletableFuture completableFuture = new CompletableFuture<>(); testCassandraSink.enqueueCompletableFuture(completableFuture); testCassandraSink.invoke("N/A"); - Assert.assertEquals(0, testCassandraSink.getAvailablePermits()); - Assert.assertEquals(1, testCassandraSink.getAcquiredPermits()); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(0); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(1); completableFuture.completeExceptionally(new RuntimeException()); - Assert.assertEquals(1, testCassandraSink.getAvailablePermits()); - Assert.assertEquals(0, testCassandraSink.getAcquiredPermits()); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(1); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(0); } } @@ -296,16 +288,16 @@ public void testReleaseOnThrowingSend() throws Exception { try (TestCassandraSink testCassandraSink = new MockCassandraSink(config, failingSendFunction)) { testCassandraSink.open(new Configuration()); - assertThat(testCassandraSink.getAvailablePermits(), is(1)); - assertThat(testCassandraSink.getAcquiredPermits(), is(0)); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(1); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(0); //noinspection OverlyBroadCatchBlock,NestedTryStatement try { testCassandraSink.invoke("none"); } catch (Throwable e) { - assertThat(e, instanceOf(Throwable.class)); - assertThat(testCassandraSink.getAvailablePermits(), is(1)); - assertThat(testCassandraSink.getAcquiredPermits(), is(0)); + assertThat(e).isInstanceOf(Throwable.class); + assertThat(testCassandraSink.getAvailablePermits()).isEqualTo(1); + assertThat(testCassandraSink.getAcquiredPermits()).isEqualTo(0); } } } @@ -326,9 +318,9 @@ public void testTimeoutExceptionOnInvoke() throws Exception { try { testCassandraSink.invoke("Invoke #2"); - Assert.fail("Sending value should have experienced a TimeoutException"); + fail("Sending value should have experienced a TimeoutException"); } catch (Exception e) { - Assert.assertTrue(e instanceof TimeoutException); + assertThat(e).isInstanceOf(TimeoutException.class); } finally { completableFuture.complete(null); } diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java index b8226972cd12c..e9c7c8868b3c4 100644 --- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java +++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraTupleWriteAheadSinkTest.java @@ -38,7 +38,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; -import static org.junit.Assert.assertFalse; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; import static org.powermock.api.mockito.PowerMockito.doAnswer; @@ -140,7 +140,7 @@ public void run() { sink.open(); // we should leave the loop and return false since we've seen an exception - assertFalse(sink.sendValues(Collections.singleton(new Tuple0()), 1L, 0L)); + assertThat(sink.sendValues(Collections.singleton(new Tuple0()), 1L, 0L)).isFalse(); sink.close(); } From f68b0ea4ad511e8903743059612d25828c0fd31f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 6 May 2022 11:09:27 +0200 Subject: [PATCH 03/16] [FLINK-27185][connector] Convert connector-elasticsearch modules to assertj Co-authored-by: slinkydeveloper --- .../sink/ElasticsearchSinkBaseITCase.java | 10 +- .../ElasticsearchSinkBuilderBaseTest.java | 44 +-- .../sink/ElasticsearchWriterITCase.java | 19 +- .../elasticsearch/sink/TestClientBase.java | 12 +- .../ElasticsearchDynamicSinkBaseITCase.java | 12 +- ...asticsearchDynamicSinkFactoryBaseTest.java | 278 ++++++++---------- .../table/IndexGeneratorTest.java | 121 ++++---- .../elasticsearch/table/KeyExtractorTest.java | 15 +- .../ElasticsearchSinkBaseTest.java | 108 ++----- .../ElasticsearchSinkTestBase.java | 51 ++-- .../table/IndexGeneratorFactoryTest.java | 103 +++---- .../elasticsearch/table/KeyExtractorTest.java | 17 +- .../testutils/ElasticsearchResource.java | 7 +- .../testutils/SourceSinkDataTestKit.java | 5 +- .../Elasticsearch6DynamicSinkFactoryTest.java | 22 +- .../Elasticsearch7DynamicSinkFactoryTest.java | 20 +- 16 files changed, 373 insertions(+), 471 deletions(-) diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBaseITCase.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBaseITCase.java index af87158b11905..c140828e8fe71 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBaseITCase.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBaseITCase.java @@ -53,9 +53,7 @@ import java.util.UUID; import java.util.function.BiFunction; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertSame; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link ElasticsearchSink}. */ @ExtendWith(TestLoggerExtension.class) @@ -114,9 +112,9 @@ void testWriteToElasticSearchWithDeliveryGuarantee(DeliveryGuarantee deliveryGua runTest(index, false, TestEmitter::jsonEmitter, deliveryGuarantee, null); } catch (IllegalStateException e) { failure = true; - assertSame(deliveryGuarantee, DeliveryGuarantee.EXACTLY_ONCE); + assertThat(deliveryGuarantee).isSameAs(DeliveryGuarantee.EXACTLY_ONCE); } finally { - assertEquals(failure, deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE); + assertThat(failure).isEqualTo(deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE); } } @@ -134,7 +132,7 @@ void testWriteJsonToElasticsearch( void testRecovery() throws Exception { final String index = "test-recovery-elasticsearch-sink"; runTest(index, true, TestEmitter::jsonEmitter, new FailingMapper()); - assertTrue(failed); + assertThat(failed).isTrue(); } private void runTest( diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBuilderBaseTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBuilderBaseTest.java index 4d1890fe78bcf..3fc2abcb357d2 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBuilderBaseTest.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchSinkBuilderBaseTest.java @@ -30,8 +30,8 @@ import java.util.stream.Stream; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; -import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for {@link ElasticsearchSinkBuilderBase}. */ @ExtendWith(TestLoggerExtension.class) @@ -54,7 +54,7 @@ Stream testValidBuilders() { return DynamicTest.stream( validBuilders, ElasticsearchSinkBuilderBase::toString, - builder -> assertDoesNotThrow(builder::build)); + builder -> assertThatCode(builder::build).doesNotThrowAnyException()); } @Test @@ -65,36 +65,38 @@ void testDefaultDeliveryGuarantee() { @Test void testThrowIfExactlyOnceConfigured() { - assertThrows( - IllegalStateException.class, - () -> createMinimalBuilder().setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)); + assertThatThrownBy( + () -> + createMinimalBuilder() + .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)) + .isInstanceOf(IllegalStateException.class); } @Test void testThrowIfHostsNotSet() { - assertThrows( - NullPointerException.class, - () -> createEmptyBuilder().setEmitter((element, indexer, context) -> {}).build()); + assertThatThrownBy( + () -> + createEmptyBuilder() + .setEmitter((element, indexer, context) -> {}) + .build()) + .isInstanceOf(NullPointerException.class); } @Test void testThrowIfEmitterNotSet() { - assertThrows( - NullPointerException.class, - () -> createEmptyBuilder().setHosts(new HttpHost("localhost:3000")).build()); + assertThatThrownBy( + () -> createEmptyBuilder().setHosts(new HttpHost("localhost:3000")).build()) + .isInstanceOf(NullPointerException.class); } @Test void testThrowIfSetInvalidTimeouts() { - assertThrows( - IllegalStateException.class, - () -> createEmptyBuilder().setConnectionRequestTimeout(-1).build()); - assertThrows( - IllegalStateException.class, - () -> createEmptyBuilder().setConnectionTimeout(-1).build()); - assertThrows( - IllegalStateException.class, - () -> createEmptyBuilder().setSocketTimeout(-1).build()); + assertThatThrownBy(() -> createEmptyBuilder().setConnectionRequestTimeout(-1).build()) + .isInstanceOf(IllegalStateException.class); + assertThatThrownBy(() -> createEmptyBuilder().setConnectionTimeout(-1).build()) + .isInstanceOf(IllegalStateException.class); + assertThatThrownBy(() -> createEmptyBuilder().setSocketTimeout(-1).build()) + .isInstanceOf(IllegalStateException.class); } abstract B createEmptyBuilder(); diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchWriterITCase.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchWriterITCase.java index 3db12a400d09d..2874ab22b740a 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchWriterITCase.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/ElasticsearchWriterITCase.java @@ -69,10 +69,7 @@ import static org.apache.flink.connector.elasticsearch.sink.TestClientBase.DOCUMENT_TYPE; import static org.apache.flink.connector.elasticsearch.sink.TestClientBase.buildMessage; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.greaterThan; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link ElasticsearchWriter}. */ @Testcontainers @@ -195,20 +192,20 @@ void testIncrementByteOutMetric() throws Exception { try (final ElasticsearchWriter> writer = createWriter(index, false, bulkProcessorConfig, metricGroup)) { final Counter numBytesOut = operatorIOMetricGroup.getNumBytesOutCounter(); - assertEquals(numBytesOut.getCount(), 0); + assertThat(numBytesOut.getCount()).isEqualTo(0); writer.write(Tuple2.of(1, buildMessage(1)), null); writer.write(Tuple2.of(2, buildMessage(2)), null); writer.blockingFlushAllActions(); long first = numBytesOut.getCount(); - assertTrue(first > 0); + assertThat(first).isGreaterThan(0); writer.write(Tuple2.of(1, buildMessage(1)), null); writer.write(Tuple2.of(2, buildMessage(2)), null); writer.blockingFlushAllActions(); - assertTrue(numBytesOut.getCount() > first); + assertThat(numBytesOut.getCount()).isGreaterThan(first); } } @@ -231,8 +228,8 @@ void testIncrementRecordsSendMetric() throws Exception { writer.blockingFlushAllActions(); - assertTrue(recordsSend.isPresent()); - assertEquals(recordsSend.get().getCount(), 3L); + assertThat(recordsSend).isPresent(); + assertThat(recordsSend.get().getCount()).isEqualTo(3L); } } @@ -252,8 +249,8 @@ void testCurrentSendTime() throws Exception { writer.blockingFlushAllActions(); - assertTrue(currentSendTime.isPresent()); - assertThat(currentSendTime.get().getValue(), greaterThan(0L)); + assertThat(currentSendTime).isPresent(); + assertThat(currentSendTime.get().getValue()).isGreaterThan(0L); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/TestClientBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/TestClientBase.java index 5d2f6f53b6be0..1dd70ce4ccae2 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/TestClientBase.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/sink/TestClientBase.java @@ -23,8 +23,7 @@ import java.io.IOException; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.assertj.core.api.Assertions.assertThat; abstract class TestClientBase { @@ -42,10 +41,11 @@ void assertThatIdsAreNotWritten(String index, int... ids) throws IOException { for (final int id : ids) { try { final GetResponse response = getResponse(index, id); - assertFalse( - response.isExists(), String.format("Id %s is unexpectedly present.", id)); + assertThat(response.isExists()) + .as(String.format("Id %s is unexpectedly present.", id)) + .isFalse(); } catch (ElasticsearchStatusException e) { - assertEquals(404, e.status().getStatus()); + assertThat(e.status().getStatus()).isEqualTo(404); } } } @@ -58,7 +58,7 @@ void assertThatIdsAreWritten(String index, int... ids) response = getResponse(index, id); Thread.sleep(10); } while (response.isSourceEmpty()); - assertEquals(buildMessage(id), response.getSource().get(DATA_FIELD_NAME)); + assertThat(response.getSource().get(DATA_FIELD_NAME)).isEqualTo(buildMessage(id)); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkBaseITCase.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkBaseITCase.java index 75dd9b98d9b23..c6d69d309bf5c 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkBaseITCase.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkBaseITCase.java @@ -44,7 +44,6 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHits; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -62,6 +61,7 @@ import java.util.Map; import static org.apache.flink.table.api.Expressions.row; +import static org.assertj.core.api.Assertions.assertThat; /** IT tests for {@link ElasticsearchDynamicSink}. */ @ExtendWith(TestLoggerExtension.class) @@ -142,7 +142,7 @@ public void testWritingDocuments() throws Exception { expectedMap.put("e", 2); expectedMap.put("f", "2003-10-20"); expectedMap.put("g", "2012-12-12 12:12:12"); - Assertions.assertEquals(response, expectedMap); + assertThat(response).isEqualTo(expectedMap); } @Test @@ -190,7 +190,7 @@ public void testWritingDocumentsFromTableApi() throws Exception { expectedMap.put("e", 2); expectedMap.put("f", "2003-10-20"); expectedMap.put("g", "2012-12-12 12:12:12"); - Assertions.assertEquals(response, expectedMap); + assertThat(response).isEqualTo(expectedMap); } @Test @@ -273,7 +273,7 @@ public void testWritingDocumentsNoPrimaryKey() throws Exception { HashSet> expectedSet = new HashSet<>(); expectedSet.add(expectedMap1); expectedSet.add(expectedMap2); - Assertions.assertEquals(resultSet, expectedSet); + assertThat(resultSet).isEqualTo(expectedSet); } @Test @@ -302,7 +302,7 @@ public void testWritingDocumentsWithDynamicIndex() throws Exception { Map expectedMap = new HashMap<>(); expectedMap.put("a", 1); expectedMap.put("b", "2012-12-12 12:12:12"); - Assertions.assertEquals(response, expectedMap); + assertThat(response).isEqualTo(expectedMap); } @Test @@ -353,6 +353,6 @@ public void testWritingDocumentsWithDynamicIndexFromSystemTime() throws Exceptio Map expectedMap = new HashMap<>(); expectedMap.put("a", 1); expectedMap.put("b", "2012-12-12 12:12:12"); - Assertions.assertEquals(response, expectedMap); + assertThat(response).isEqualTo(expectedMap); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkFactoryBaseTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkFactoryBaseTest.java index 8fa0a4986ea0e..8e5a98e7f3e1b 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkFactoryBaseTest.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/ElasticsearchDynamicSinkFactoryBaseTest.java @@ -20,7 +20,6 @@ import org.apache.flink.api.common.typeutils.base.VoidSerializer; import org.apache.flink.connector.elasticsearch.ElasticsearchUtil; -import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.catalog.Column; import org.apache.flink.table.catalog.ResolvedSchema; @@ -31,16 +30,24 @@ import org.apache.flink.types.RowKind; import org.apache.flink.util.TestLoggerExtension; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; -import org.junit.jupiter.api.function.Executable; import java.util.Arrays; import java.util.Collections; +import static org.apache.flink.table.api.DataTypes.ARRAY; +import static org.apache.flink.table.api.DataTypes.BIGINT; +import static org.apache.flink.table.api.DataTypes.BYTES; +import static org.apache.flink.table.api.DataTypes.FIELD; +import static org.apache.flink.table.api.DataTypes.MAP; +import static org.apache.flink.table.api.DataTypes.MULTISET; +import static org.apache.flink.table.api.DataTypes.RAW; +import static org.apache.flink.table.api.DataTypes.ROW; +import static org.apache.flink.table.api.DataTypes.STRING; import static org.apache.flink.table.factories.FactoryUtil.SINK_PARALLELISM; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for validation in {@link ElasticsearchDynamicSinkFactoryBase}. */ @ExtendWith(TestLoggerExtension.class) @@ -50,190 +57,164 @@ abstract class ElasticsearchDynamicSinkFactoryBaseTest { abstract TestContext createPrefilledTestContext(); - void assertValidationException(String expectedMessage, Executable executable) { - ValidationException thrown = Assertions.assertThrows(ValidationException.class, executable); - Assertions.assertEquals(expectedMessage, thrown.getMessage()); - } - @Test public void validateWrongIndex() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "'index' must not be empty", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withOption( - ElasticsearchConnectorOptions.INDEX_OPTION.key(), - "") - .build())); + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withOption( + ElasticsearchConnectorOptions.INDEX_OPTION + .key(), + "") + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage("'index' must not be empty"); } @Test public void validateWrongHosts() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "Could not parse host 'wrong-host' in option 'hosts'. It should follow the format 'http://host_name:port'.", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withOption( - ElasticsearchConnectorOptions.HOSTS_OPTION.key(), - "wrong-host") - .build())); + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withOption( + ElasticsearchConnectorOptions.HOSTS_OPTION + .key(), + "wrong-host") + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage( + "Could not parse host 'wrong-host' in option 'hosts'. It should follow the format 'http://host_name:port'."); } @Test public void validateWrongFlushSize() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "'sink.bulk-flush.max-size' must be in MB granularity. Got: 1024 bytes", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withOption( - ElasticsearchConnectorOptions - .BULK_FLUSH_MAX_SIZE_OPTION - .key(), - "1kb") - .build())); + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withOption( + ElasticsearchConnectorOptions + .BULK_FLUSH_MAX_SIZE_OPTION + .key(), + "1kb") + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage( + "'sink.bulk-flush.max-size' must be in MB granularity. Got: 1024 bytes"); } @Test public void validateWrongRetries() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withOption( - ElasticsearchConnectorOptions - .BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION - .key(), - "0") - .build())); + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withOption( + ElasticsearchConnectorOptions + .BULK_FLUSH_BACKOFF_MAX_RETRIES_OPTION + .key(), + "0") + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage("'sink.bulk-flush.backoff.max-retries' must be at least 1. Got: 0"); } @Test public void validateWrongMaxActions() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "'sink.bulk-flush.max-actions' must be at least 1. Got: -2", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withOption( - ElasticsearchConnectorOptions - .BULK_FLUSH_MAX_ACTIONS_OPTION - .key(), - "-2") - .build())); + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withOption( + ElasticsearchConnectorOptions + .BULK_FLUSH_MAX_ACTIONS_OPTION + .key(), + "-2") + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage("'sink.bulk-flush.max-actions' must be at least 1. Got: -2"); } @Test public void validateWrongBackoffDelay() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "Invalid value for option 'sink.bulk-flush.backoff.delay'.", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withOption( - ElasticsearchConnectorOptions - .BULK_FLUSH_BACKOFF_DELAY_OPTION - .key(), - "-1s") - .build())); + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withOption( + ElasticsearchConnectorOptions + .BULK_FLUSH_BACKOFF_DELAY_OPTION + .key(), + "-1s") + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage("Invalid value for option 'sink.bulk-flush.backoff.delay'."); } @Test public void validatePrimaryKeyOnIllegalColumn() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "The table has a primary key on columns of illegal types: " - + "[ARRAY, MAP, MULTISET, ROW, RAW, VARBINARY].", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withSchema( - new ResolvedSchema( - Arrays.asList( - Column.physical( - "a", - DataTypes.BIGINT() - .notNull()), - Column.physical( - "b", - DataTypes.ARRAY( - DataTypes - .BIGINT() - .notNull()) - .notNull()), - Column.physical( - "c", - DataTypes.MAP( - DataTypes - .BIGINT(), - DataTypes - .STRING()) - .notNull()), - Column.physical( - "d", - DataTypes.MULTISET( - DataTypes - .BIGINT() - .notNull()) - .notNull()), - Column.physical( - "e", - DataTypes.ROW( - DataTypes - .FIELD( - "a", - DataTypes - .BIGINT())) - .notNull()), - Column.physical( - "f", - DataTypes.RAW( - Void.class, - VoidSerializer - .INSTANCE) - .notNull()), - Column.physical( - "g", - DataTypes.BYTES() - .notNull())), - Collections.emptyList(), - UniqueConstraint.primaryKey( - "name", - Arrays.asList( - "a", "b", "c", "d", "e", - "f", "g")))) - .build())); + ResolvedSchema resolvedSchema = + new ResolvedSchema( + Arrays.asList( + Column.physical("a", BIGINT().notNull()), + Column.physical("b", ARRAY(BIGINT().notNull()).notNull()), + Column.physical("c", MAP(BIGINT(), STRING()).notNull()), + Column.physical("d", MULTISET(BIGINT().notNull()).notNull()), + Column.physical("e", ROW(FIELD("a", BIGINT())).notNull()), + Column.physical( + "f", RAW(Void.class, VoidSerializer.INSTANCE).notNull()), + Column.physical("g", BYTES().notNull())), + Collections.emptyList(), + UniqueConstraint.primaryKey( + "name", Arrays.asList("a", "b", "c", "d", "e", "f", "g"))); + + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withSchema(resolvedSchema) + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage( + "The table has a primary key on columns of illegal types: " + + "[ARRAY, MAP, MULTISET, ROW, RAW, VARBINARY]."); } @Test public void validateWrongCredential() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "'username' and 'password' must be set at the same time. Got: username 'username' and password ''", - () -> - sinkFactory.createDynamicTableSink( - createPrefilledTestContext() - .withOption( - ElasticsearchConnectorOptions.USERNAME_OPTION.key(), - "username") - .withOption( - ElasticsearchConnectorOptions.PASSWORD_OPTION.key(), - "") - .build())); + assertThatThrownBy( + () -> + sinkFactory.createDynamicTableSink( + createPrefilledTestContext() + .withOption( + ElasticsearchConnectorOptions + .USERNAME_OPTION + .key(), + "username") + .withOption( + ElasticsearchConnectorOptions + .PASSWORD_OPTION + .key(), + "") + .build())) + .isInstanceOf(ValidationException.class) + .hasMessage( + "'username' and 'password' must be set at the same time. Got: username 'username' and password ''"); } @Test @@ -252,9 +233,10 @@ public void validateDynamicIndexOnChangelogStream() { .addContainedKind(RowKind.DELETE) .addContainedKind(RowKind.INSERT) .build(); - assertValidationException( - "Dynamic indexing based on system time only works on append only stream.", - () -> sink.getChangelogMode(changelogMode)); + assertThatThrownBy(() -> sink.getChangelogMode(changelogMode)) + .isInstanceOf(ValidationException.class) + .hasMessage( + "Dynamic indexing based on system time only works on append only stream."); } @Test @@ -269,6 +251,6 @@ public void testSinkParallelism() { ElasticsearchDynamicSink esSink = (ElasticsearchDynamicSink) sink; SinkV2Provider provider = (SinkV2Provider) esSink.getSinkRuntimeProvider(new ElasticsearchUtil.MockContext()); - assertThat(2).isEqualTo(provider.getParallelism().get()); + assertThat(provider.getParallelism()).hasValue(2); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/IndexGeneratorTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/IndexGeneratorTest.java index 8760d0d4a4372..307b0425f1381 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/IndexGeneratorTest.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/IndexGeneratorTest.java @@ -26,7 +26,6 @@ import org.apache.flink.table.data.TimestampData; import org.apache.flink.table.types.DataType; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.sql.Date; @@ -37,10 +36,11 @@ import java.time.LocalTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; -import java.time.temporal.UnsupportedTemporalTypeException; import java.util.Arrays; import java.util.List; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.jupiter.api.Assumptions.assumingThat; /** Suite tests for {@link IndexGenerator}. */ @@ -128,10 +128,10 @@ public void testDynamicIndexFromTimestampTzUTC() { fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals( - "2020_03_17_19-14_index", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals( - "2020_03_20_03-14_index", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))) + .isEqualTo("2020_03_17_19-14_index"); + assertThat(indexGenerator.generate(rows.get(1))) + .isEqualTo("2020_03_20_03-14_index"); }); } @@ -144,8 +144,8 @@ public void testDynamicIndexFromTimestampTzWithSpecificTimezone() { dataTypes, ZoneId.of("Europe/Berlin")); indexGenerator.open(); - Assertions.assertEquals("2020_03_17_20-14_index", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("2020_03_20_04-14_index", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_17_20-14_index"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("2020_03_20_04-14_index"); } @Test @@ -154,12 +154,12 @@ public void testDynamicIndexFromTimestamp() { IndexGeneratorFactory.createIndexGenerator( "{order_timestamp|yyyy_MM_dd_HH-ss}_index", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("2020_03_18_12-14_index", indexGenerator.generate(rows.get(0))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index"); IndexGenerator indexGenerator1 = IndexGeneratorFactory.createIndexGenerator( "{order_timestamp|yyyy_MM_dd_HH_mm}_index", fieldNames, dataTypes); indexGenerator1.open(); - Assertions.assertEquals("2020_03_19_12_22_index", indexGenerator1.generate(rows.get(1))); + assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index"); } @Test @@ -168,12 +168,12 @@ public void testDynamicIndexFromLocalDateTime() { IndexGeneratorFactory.createIndexGenerator( "{local_datetime|yyyy_MM_dd_HH-ss}_index", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("2020_03_18_12-14_index", indexGenerator.generate(rows.get(0))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index"); IndexGenerator indexGenerator1 = IndexGeneratorFactory.createIndexGenerator( "{local_datetime|yyyy_MM_dd_HH_mm}_index", fieldNames, dataTypes); indexGenerator1.open(); - Assertions.assertEquals("2020_03_19_12_22_index", indexGenerator1.generate(rows.get(1))); + assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index"); } @Test @@ -182,8 +182,8 @@ public void testDynamicIndexFromDate() { IndexGeneratorFactory.createIndexGenerator( "my-index-{log_date|yyyy/MM/dd}", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("my-index-2020/03/18", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("my-index-2020/03/19", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19"); } @Test @@ -192,8 +192,8 @@ public void testDynamicIndexFromLocalDate() { IndexGeneratorFactory.createIndexGenerator( "my-index-{local_date|yyyy/MM/dd}", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("my-index-2020/03/18", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("my-index-2020/03/19", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19"); } @Test @@ -202,8 +202,8 @@ public void testDynamicIndexFromTime() { IndexGeneratorFactory.createIndexGenerator( "my-index-{log_time|HH-mm}", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("my-index-12-12", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("my-index-12-22", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-12"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-22"); } @Test @@ -212,8 +212,8 @@ public void testDynamicIndexFromLocalTime() { IndexGeneratorFactory.createIndexGenerator( "my-index-{local_time|HH-mm}", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("my-index-12-13", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("my-index-12-13", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-13"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-13"); } @Test @@ -222,8 +222,8 @@ public void testDynamicIndexDefaultFormat() { IndexGeneratorFactory.createIndexGenerator( "my-index-{local_time|}", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("my-index-12_13_14", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("my-index-12_13_14", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12_13_14"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12_13_14"); } @Test @@ -259,9 +259,10 @@ public void testDynamicIndexFromSystemTime() { String actualIndex = indexGenerator.generate(rows.get(1)); String expectedIndex2 = "my-index-" + LocalDateTime.now().format(dateTimeFormatter); - Assertions.assertTrue( - actualIndex.equals(expectedIndex1) - || actualIndex.equals(expectedIndex2)); + assertThat( + actualIndex.equals(expectedIndex1) + || actualIndex.equals(expectedIndex2)) + .isTrue(); }); List invalidUseCases = @@ -289,7 +290,7 @@ public void testDynamicIndexFromSystemTime() { dataTypes); indexGenerator.open(); } catch (TableException e) { - Assertions.assertEquals(expectedExceptionMsg, e.getMessage()); + assertThat(e).hasMessage(expectedExceptionMsg); } }); } @@ -299,8 +300,8 @@ public void testGeneralDynamicIndex() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("index_{item}", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("index_apple", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("index_peanut", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("index_apple"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("index_peanut"); } @Test @@ -308,8 +309,8 @@ public void testStaticIndex() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("my-index", fieldNames, dataTypes); indexGenerator.open(); - Assertions.assertEquals("my-index", indexGenerator.generate(rows.get(0))); - Assertions.assertEquals("my-index", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index"); } @Test @@ -317,12 +318,12 @@ public void testUnknownField() { String expectedExceptionMsg = "Unknown field 'unknown_ts' in index pattern 'my-index-{unknown_ts|yyyy-MM-dd}'," + " please check the field name."; - try { - IndexGeneratorFactory.createIndexGenerator( - "my-index-{unknown_ts|yyyy-MM-dd}", fieldNames, dataTypes); - } catch (TableException e) { - Assertions.assertEquals(e.getMessage(), expectedExceptionMsg); - } + assertThatThrownBy( + () -> + IndexGeneratorFactory.createIndexGenerator( + "my-index-{unknown_ts|yyyy-MM-dd}", fieldNames, dataTypes)) + .isInstanceOf(TableException.class) + .hasMessage(expectedExceptionMsg); } @Test @@ -330,12 +331,12 @@ public void testUnsupportedTimeType() { String expectedExceptionMsg = "Unsupported type 'INT' found in Elasticsearch dynamic index field, " + "time-related pattern only support types are: DATE,TIME,TIMESTAMP."; - try { - IndexGeneratorFactory.createIndexGenerator( - "my-index-{id|yyyy-MM-dd}", fieldNames, dataTypes); - } catch (TableException e) { - Assertions.assertEquals(expectedExceptionMsg, e.getMessage()); - } + assertThatThrownBy( + () -> + IndexGeneratorFactory.createIndexGenerator( + "my-index-{id|yyyy-MM-dd}", fieldNames, dataTypes)) + .isInstanceOf(TableException.class) + .hasMessage(expectedExceptionMsg); } @Test @@ -343,23 +344,14 @@ public void testUnsupportedMultiParametersType() { String expectedExceptionMsg = "Chaining dynamic index pattern my-index-{local_date}-{local_time} is not supported," + " only support single dynamic index pattern."; - try { - IndexGeneratorFactory.createIndexGenerator( - "my-index-{local_date}-{local_time}", fieldNames, dataTypes); - } catch (TableException e) { - Assertions.assertEquals(expectedExceptionMsg, e.getMessage()); - } - } - - @Test - public void testDynamicIndexUnsupportedFormat() { - String expectedExceptionMsg = "Unsupported field: HourOfDay"; - try { - IndexGeneratorFactory.createIndexGenerator( - "my-index-{local_date|yyyy/MM/dd HH:mm}", fieldNames, dataTypes); - } catch (UnsupportedTemporalTypeException e) { - Assertions.assertEquals(expectedExceptionMsg, e.getMessage()); - } + assertThatThrownBy( + () -> + IndexGeneratorFactory.createIndexGenerator( + "my-index-{local_date}-{local_time}", + fieldNames, + dataTypes)) + .isInstanceOf(TableException.class) + .hasMessage(expectedExceptionMsg); } @Test @@ -368,10 +360,11 @@ public void testUnsupportedIndexFieldType() { "Unsupported type BOOLEAN of index field, Supported types are:" + " [DATE, TIME_WITHOUT_TIME_ZONE, TIMESTAMP_WITHOUT_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE," + " TIMESTAMP_WITH_LOCAL_TIME_ZONE, VARCHAR, CHAR, TINYINT, INTEGER, BIGINT]"; - try { - IndexGeneratorFactory.createIndexGenerator("index_{status}", fieldNames, dataTypes); - } catch (IllegalArgumentException e) { - Assertions.assertEquals(expectedExceptionMsg, e.getMessage()); - } + assertThatThrownBy( + () -> + IndexGeneratorFactory.createIndexGenerator( + "index_{status}", fieldNames, dataTypes)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage(expectedExceptionMsg); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/KeyExtractorTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/KeyExtractorTest.java index e2110ca707b2e..29f7502538d87 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/KeyExtractorTest.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/connector/elasticsearch/table/KeyExtractorTest.java @@ -24,7 +24,6 @@ import org.apache.flink.table.data.StringData; import org.apache.flink.table.data.TimestampData; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.time.Instant; @@ -37,6 +36,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.assertj.core.api.Assertions.assertThat; + /** Tests for {@link KeyExtractor}. */ public class KeyExtractorTest { @Test @@ -51,7 +52,7 @@ public void testSimpleKey() { KeyExtractor.createKeyExtractor(logicalTypesWithIndex, "_"); String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD"))); - Assertions.assertEquals(key, "12"); + assertThat(key).isEqualTo("12"); } @Test @@ -62,7 +63,7 @@ public void testNoPrimaryKey() { KeyExtractor.createKeyExtractor(logicalTypesWithIndex, "_"); String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD"))); - Assertions.assertEquals(key, null); + assertThat(key).isNull(); } @Test @@ -85,7 +86,7 @@ public void testTwoFieldsKey() { StringData.fromString("ABCD"), TimestampData.fromLocalDateTime( LocalDateTime.parse("2012-12-12T12:12:12")))); - Assertions.assertEquals(key, "12_2012-12-12T12:12:12"); + assertThat(key).isEqualTo("12_2012-12-12T12:12:12"); } @Test @@ -140,8 +141,8 @@ public void testAllTypesKey() { TimestampData.fromInstant(Instant.parse("2013-01-13T13:13:13Z")), (int) (LocalTime.parse("14:14:14").toNanoOfDay() / 1_000_000), (int) LocalDate.parse("2015-05-15").toEpochDay())); - Assertions.assertEquals( - key, - "1_2_3_4_true_1.0_2.0_ABCD_2012-12-12T12:12:12_2013-01-13T13:13:13_14:14:14_2015-05-15"); + assertThat(key) + .isEqualTo( + "1_2_3_4_true_1.0_2.0_ABCD_2012-12-12T12:12:12_2013-01-13T13:13:13_14:14:14_2015-05-15"); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java index 98c409300885b..33675d1bc01b9 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkBaseTest.java @@ -36,7 +36,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; -import org.junit.Assert; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -50,6 +49,8 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -104,17 +105,9 @@ public void testItemFailureRethrownOnInvoke() throws Throwable { // manually execute the next bulk request sink.manualBulkRequestWithAllPendingRequests(); - try { - testHarness.processElement(new StreamRecord<>("next msg")); - } catch (Exception e) { - // the invoke should have failed with the failure - Assert.assertTrue(e.getCause().getMessage().contains("artificial failure for record")); - - // test succeeded - return; - } - - Assert.fail(); + assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>("next msg"))) + .getCause() + .hasMessageContaining("artificial failure for record"); } /** @@ -143,18 +136,10 @@ public void testItemFailureRethrownOnCheckpoint() throws Throwable { // manually execute the next bulk request sink.manualBulkRequestWithAllPendingRequests(); - try { - testHarness.snapshot(1L, 1000L); - } catch (Exception e) { - // the snapshot should have failed with the failure - Assert.assertTrue( - e.getCause().getCause().getMessage().contains("artificial failure for record")); - - // test succeeded - return; - } - - Assert.fail(); + assertThatThrownBy(() -> testHarness.snapshot(1L, 1000L)) + .getCause() + .getCause() + .hasMessageContaining("artificial failure for record"); } /** @@ -213,18 +198,10 @@ public void go() throws Exception { // fail) sink.continueFlush(); - try { - snapshotThread.sync(); - } catch (Exception e) { - // the snapshot should have failed with the failure from the 2nd request - Assert.assertTrue( - e.getCause().getCause().getMessage().contains("artificial failure for record")); - - // test succeeded - return; - } - - Assert.fail(); + assertThatThrownBy(snapshotThread::sync) + .getCause() + .getCause() + .hasMessageContaining("artificial failure for record"); } /** @@ -252,18 +229,9 @@ public void testBulkFailureRethrownOnInvoke() throws Throwable { // manually execute the next bulk request sink.manualBulkRequestWithAllPendingRequests(); - try { - testHarness.processElement(new StreamRecord<>("next msg")); - } catch (Exception e) { - // the invoke should have failed with the bulk request failure - Assert.assertTrue( - e.getCause().getMessage().contains("artificial failure for bulk request")); - - // test succeeded - return; - } - - Assert.fail(); + assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>("next msg"))) + .getCause() + .hasMessageContaining("artificial failure for bulk request"); } /** @@ -291,21 +259,10 @@ public void testBulkFailureRethrownOnCheckpoint() throws Throwable { // manually execute the next bulk request sink.manualBulkRequestWithAllPendingRequests(); - try { - testHarness.snapshot(1L, 1000L); - } catch (Exception e) { - // the snapshot should have failed with the bulk request failure - Assert.assertTrue( - e.getCause() - .getCause() - .getMessage() - .contains("artificial failure for bulk request")); - - // test succeeded - return; - } - - Assert.fail(); + assertThatThrownBy(() -> testHarness.snapshot(1L, 1000L)) + .getCause() + .getCause() + .hasMessageContaining("artificial failure for bulk request"); } /** @@ -360,21 +317,10 @@ public void go() throws Exception { // let the snapshot-triggered flush continue (bulk request should fail completely) sink.continueFlush(); - try { - snapshotThread.sync(); - } catch (Exception e) { - // the snapshot should have failed with the bulk request failure - Assert.assertTrue( - e.getCause() - .getCause() - .getMessage() - .contains("artificial failure for bulk request")); - - // test succeeded - return; - } - - Assert.fail(); + assertThatThrownBy(snapshotThread::sync) + .getCause() + .getCause() + .hasMessageContaining("artificial failure for bulk request"); } /** @@ -427,7 +373,7 @@ public void go() throws Exception { } // current number of pending request should be 1 due to the re-add - Assert.assertEquals(1, sink.getNumPendingRequests()); + assertThat(sink.getNumPendingRequests()).isEqualTo(1); // this time, let the bulk request succeed, so no-more requests are re-added sink.setMockItemFailuresListForNextBulkItemResponses( @@ -482,8 +428,8 @@ public void testOpenAndCloseInSinkFunction() throws Exception { sink.open(mock(Configuration.class)); sink.close(); - Assert.assertTrue(sinkFunction.openCalled); - Assert.assertTrue(sinkFunction.closeCalled); + assertThat(sinkFunction.openCalled).isTrue(); + assertThat(sinkFunction.closeCalled).isTrue(); } private static class DummyElasticsearchSink extends ElasticsearchSinkBase { diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java index 84342fc6fed22..5c3d4ec386319 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/ElasticsearchSinkTestBase.java @@ -32,7 +32,7 @@ import java.util.Map; import java.util.function.Function; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** * Environment preparation and suite of tests for version-specific {@link ElasticsearchSinkBase} @@ -100,33 +100,28 @@ private void runElasticSearchSinkTest( * null}. */ public void runNullAddressesTest() { - try { - createElasticsearchSink( - 1, getClusterName(), null, SourceSinkDataTestKit.getJsonSinkFunction("test")); - } catch (IllegalArgumentException | NullPointerException expectedException) { - // test passes - return; - } - - fail(); + assertThatThrownBy( + () -> + createElasticsearchSink( + 1, + getClusterName(), + null, + SourceSinkDataTestKit.getJsonSinkFunction("test"))) + .isInstanceOfAny(IllegalArgumentException.class, NullPointerException.class); } /** * Tests that the Elasticsearch sink fails eagerly if the provided list of addresses is empty. */ public void runEmptyAddressesTest() { - try { - createElasticsearchSink( - 1, - getClusterName(), - Collections.emptyList(), - SourceSinkDataTestKit.getJsonSinkFunction("test")); - } catch (IllegalArgumentException expectedException) { - // test passes - return; - } - - fail(); + assertThatThrownBy( + () -> + createElasticsearchSink( + 1, + getClusterName(), + Collections.emptyList(), + SourceSinkDataTestKit.getJsonSinkFunction("test"))) + .isInstanceOf(IllegalArgumentException.class); } /** Tests whether the Elasticsearch sink fails when there is no cluster to connect to. */ @@ -143,16 +138,8 @@ public void runInvalidElasticsearchClusterTest() throws Exception { SourceSinkDataTestKit.getJsonSinkFunction("test"), "123.123.123.123")); // incorrect ip address - try { - env.execute("Elasticsearch Sink Test"); - } catch (JobExecutionException expectedException) { - // every ES version throws a different exception in case of timeouts, so don't bother - // asserting on the exception - // test passes - return; - } - - fail(); + assertThatThrownBy(() -> env.execute("Elasticsearch Sink Test")) + .isInstanceOf(JobExecutionException.class); } /** Utility method to create a user config map. */ diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGeneratorFactoryTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGeneratorFactoryTest.java index bbb5081b73389..5b90d15158f81 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGeneratorFactoryTest.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/IndexGeneratorFactoryTest.java @@ -27,7 +27,6 @@ import org.apache.flink.table.data.TimestampData; import org.apache.flink.util.TestLogger; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -37,11 +36,12 @@ import java.time.LocalTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; -import java.time.temporal.UnsupportedTemporalTypeException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assume.assumeThat; @@ -100,12 +100,12 @@ public void testDynamicIndexFromTimestamp() { IndexGeneratorFactory.createIndexGenerator( "{order_timestamp|yyyy_MM_dd_HH-ss}_index", schema); indexGenerator.open(); - Assert.assertEquals("2020_03_18_12-14_index", indexGenerator.generate(rows.get(0))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("2020_03_18_12-14_index"); IndexGenerator indexGenerator1 = IndexGeneratorFactory.createIndexGenerator( "{order_timestamp|yyyy_MM_dd_HH_mm}_index", schema); indexGenerator1.open(); - Assert.assertEquals("2020_03_19_12_22_index", indexGenerator1.generate(rows.get(1))); + assertThat(indexGenerator1.generate(rows.get(1))).isEqualTo("2020_03_19_12_22_index"); } @Test @@ -114,8 +114,8 @@ public void testDynamicIndexFromDate() { IndexGeneratorFactory.createIndexGenerator( "my-index-{log_date|yyyy/MM/dd}", schema); indexGenerator.open(); - Assert.assertEquals("my-index-2020/03/18", indexGenerator.generate(rows.get(0))); - Assert.assertEquals("my-index-2020/03/19", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020/03/18"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020/03/19"); } @Test @@ -123,8 +123,8 @@ public void testDynamicIndexFromTime() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("my-index-{log_time|HH-mm}", schema); indexGenerator.open(); - Assert.assertEquals("my-index-12-12", indexGenerator.generate(rows.get(0))); - Assert.assertEquals("my-index-12-22", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12-12"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12-22"); } @Test @@ -132,8 +132,8 @@ public void testDynamicIndexDefaultFormat() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("my-index-{log_time|}", schema); indexGenerator.open(); - Assert.assertEquals("my-index-12_12_14", indexGenerator.generate(rows.get(0))); - Assert.assertEquals("my-index-12_22_21", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-12_12_14"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-12_22_21"); } @Test @@ -167,9 +167,10 @@ public void testDynamicIndexFromSystemTime() { String actualIndex = indexGenerator.generate(rows.get(1)); String expectedIndex2 = "my-index-" + LocalDateTime.now().format(dateTimeFormatter); - Assert.assertTrue( - actualIndex.equals(expectedIndex1) - || actualIndex.equals(expectedIndex2)); + assertThat( + actualIndex.equals(expectedIndex1) + || actualIndex.equals(expectedIndex2)) + .isTrue(); }); List invalidUseCases = @@ -196,7 +197,7 @@ public void testDynamicIndexFromSystemTime() { schema); indexGenerator.open(); } catch (TableException e) { - Assert.assertEquals(expectedExceptionMsg, e.getMessage()); + assertThat(e).hasMessage(expectedExceptionMsg); } }); } @@ -208,8 +209,8 @@ public void testDynamicIndexDefaultFormatTimestampWithLocalTimeZoneUTC() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("my-index-{local_timestamp|}", schema); indexGenerator.open(); - Assert.assertEquals("my-index-2020_03_17_19_12_14Z", indexGenerator.generate(rows.get(0))); - Assert.assertEquals("my-index-2020_03_20_03_22_14Z", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index-2020_03_17_19_12_14Z"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index-2020_03_20_03_22_14Z"); } @Test @@ -218,10 +219,10 @@ public void testDynamicIndexDefaultFormatTimestampWithLocalTimeZoneWithSpecificT IndexGeneratorFactory.createIndexGenerator( "my-index-{local_timestamp|}", schema, ZoneId.of("Europe/Berlin")); indexGenerator.open(); - Assert.assertEquals( - "my-index-2020_03_17_20_12_14+01", indexGenerator.generate(rows.get(0))); - Assert.assertEquals( - "my-index-2020_03_20_04_22_14+01", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))) + .isEqualTo("my-index-2020_03_17_20_12_14+01"); + assertThat(indexGenerator.generate(rows.get(1))) + .isEqualTo("my-index-2020_03_20_04_22_14+01"); } @Test @@ -229,8 +230,8 @@ public void testGeneralDynamicIndex() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("index_{item}", schema); indexGenerator.open(); - Assert.assertEquals("index_apple", indexGenerator.generate(rows.get(0))); - Assert.assertEquals("index_peanut", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("index_apple"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("index_peanut"); } @Test @@ -238,8 +239,8 @@ public void testStaticIndex() { IndexGenerator indexGenerator = IndexGeneratorFactory.createIndexGenerator("my-index", schema); indexGenerator.open(); - Assert.assertEquals("my-index", indexGenerator.generate(rows.get(0))); - Assert.assertEquals("my-index", indexGenerator.generate(rows.get(1))); + assertThat(indexGenerator.generate(rows.get(0))).isEqualTo("my-index"); + assertThat(indexGenerator.generate(rows.get(1))).isEqualTo("my-index"); } @Test @@ -247,11 +248,12 @@ public void testUnknownField() { String expectedExceptionMsg = "Unknown field 'unknown_ts' in index pattern 'my-index-{unknown_ts|yyyy-MM-dd}'," + " please check the field name."; - try { - IndexGeneratorFactory.createIndexGenerator("my-index-{unknown_ts|yyyy-MM-dd}", schema); - } catch (TableException e) { - Assert.assertEquals(e.getMessage(), expectedExceptionMsg); - } + assertThatThrownBy( + () -> + IndexGeneratorFactory.createIndexGenerator( + "my-index-{unknown_ts|yyyy-MM-dd}", schema)) + .isInstanceOf(TableException.class) + .hasMessage(expectedExceptionMsg); } @Test @@ -259,11 +261,12 @@ public void testUnsupportedTimeType() { String expectedExceptionMsg = "Unsupported type 'INT' found in Elasticsearch dynamic index field, " + "time-related pattern only support types are: DATE,TIME,TIMESTAMP."; - try { - IndexGeneratorFactory.createIndexGenerator("my-index-{id|yyyy-MM-dd}", schema); - } catch (TableException e) { - Assert.assertEquals(expectedExceptionMsg, e.getMessage()); - } + assertThatThrownBy( + () -> + IndexGeneratorFactory.createIndexGenerator( + "my-index-{id|yyyy-MM-dd}", schema)) + .isInstanceOf(TableException.class) + .hasMessage(expectedExceptionMsg); } @Test @@ -271,23 +274,12 @@ public void testUnsupportedMultiParametersType() { String expectedExceptionMsg = "Chaining dynamic index pattern my-index-{local_date}-{local_time} is not supported," + " only support single dynamic index pattern."; - try { - IndexGeneratorFactory.createIndexGenerator( - "my-index-{local_date}-{local_time}", schema); - } catch (TableException e) { - Assert.assertEquals(expectedExceptionMsg, e.getMessage()); - } - } - - @Test - public void testDynamicIndexUnsupportedFormat() { - String expectedExceptionMsg = "Unsupported field: HourOfDay"; - try { - IndexGeneratorFactory.createIndexGenerator( - "my-index-{log_date|yyyy/MM/dd HH:mm}", schema); - } catch (UnsupportedTemporalTypeException e) { - Assert.assertEquals(expectedExceptionMsg, e.getMessage()); - } + assertThatThrownBy( + () -> + IndexGeneratorFactory.createIndexGenerator( + "my-index-{local_date}-{local_time}", schema)) + .isInstanceOf(TableException.class) + .hasMessage(expectedExceptionMsg); } @Test @@ -296,10 +288,9 @@ public void testUnsupportedIndexFieldType() { "Unsupported type BOOLEAN of index field, Supported types are:" + " [DATE, TIME_WITHOUT_TIME_ZONE, TIMESTAMP_WITHOUT_TIME_ZONE, TIMESTAMP_WITH_TIME_ZONE," + " TIMESTAMP_WITH_LOCAL_TIME_ZONE, VARCHAR, CHAR, TINYINT, INTEGER, BIGINT]"; - try { - IndexGeneratorFactory.createIndexGenerator("index_{status}", schema); - } catch (IllegalArgumentException e) { - Assert.assertEquals(expectedExceptionMsg, e.getMessage()); - } + assertThatThrownBy( + () -> IndexGeneratorFactory.createIndexGenerator("index_{status}", schema)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessage(expectedExceptionMsg); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/KeyExtractorTest.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/KeyExtractorTest.java index bcfb68da0b710..b7479f98004a7 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/KeyExtractorTest.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/table/KeyExtractorTest.java @@ -33,9 +33,7 @@ import java.time.LocalTime; import java.util.function.Function; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link KeyExtractor}. */ public class KeyExtractorTest { @@ -51,7 +49,7 @@ public void testSimpleKey() { Function keyExtractor = KeyExtractor.createKeyExtractor(schema, "_"); String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD"))); - assertThat(key, equalTo("12")); + assertThat(key).isEqualTo("12"); } @Test @@ -65,7 +63,7 @@ public void testNoPrimaryKey() { Function keyExtractor = KeyExtractor.createKeyExtractor(schema, "_"); String key = keyExtractor.apply(GenericRowData.of(12L, StringData.fromString("ABCD"))); - assertThat(key, nullValue()); + assertThat(key).isNull(); } @Test @@ -87,7 +85,7 @@ public void testTwoFieldsKey() { StringData.fromString("ABCD"), TimestampData.fromLocalDateTime( LocalDateTime.parse("2012-12-12T12:12:12")))); - assertThat(key, equalTo("12_2012-12-12T12:12:12")); + assertThat(key).isEqualTo("12_2012-12-12T12:12:12"); } @Test @@ -127,9 +125,8 @@ public void testAllTypesKey() { TimestampData.fromInstant(Instant.parse("2013-01-13T13:13:13Z")), (int) (LocalTime.parse("14:14:14").toNanoOfDay() / 1_000_000), (int) LocalDate.parse("2015-05-15").toEpochDay())); - assertThat( - key, - equalTo( - "1_2_3_4_true_1.0_2.0_ABCD_2012-12-12T12:12:12_2013-01-13T13:13:13_14:14:14_2015-05-15")); + assertThat(key) + .isEqualTo( + "1_2_3_4_true_1.0_2.0_ABCD_2012-12-12T12:12:12_2013-01-13T13:13:13_14:14:14_2015-05-15"); } } diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/ElasticsearchResource.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/ElasticsearchResource.java index cd11b70ddfed7..f92bcf1529cb2 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/ElasticsearchResource.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/ElasticsearchResource.java @@ -33,8 +33,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** A resource that starts an embedded elasticsearch cluster. */ public class ElasticsearchResource extends ExternalResource { @@ -82,8 +81,8 @@ private void waitForCluster() { ClusterHealthResponse health = healthFuture.actionGet(TimeValue.timeValueSeconds(120)); - assertThat(health.getNumberOfNodes(), greaterThanOrEqualTo(1)); - assertThat(health.getNumberOfDataNodes(), greaterThanOrEqualTo(1)); + assertThat(health.getNumberOfNodes()).isGreaterThanOrEqualTo(1); + assertThat(health.getNumberOfDataNodes()).isGreaterThanOrEqualTo(1); } @Override diff --git a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java index 584ed4d3f6e7e..33023a733ea7f 100644 --- a/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java +++ b/flink-connectors/flink-connector-elasticsearch-base/src/test/java/org/apache/flink/streaming/connectors/elasticsearch/testutils/SourceSinkDataTestKit.java @@ -30,13 +30,14 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.junit.Assert; import java.io.IOException; import java.io.Serializable; import java.util.HashMap; import java.util.Map; +import static org.assertj.core.api.Assertions.assertThat; + /** * This class contains utilities and a pre-defined source function and Elasticsearch Sink function * used to simulate and verify data used in tests. @@ -144,7 +145,7 @@ public static void verifyProducedSinkData(RestHighLevelClient client, String ind client.get( new GetRequest(index, TYPE_NAME, Integer.toString(i)), RequestOptions.DEFAULT); - Assert.assertEquals(DATA_PREFIX + i, response.getSource().get(DATA_FIELD_NAME)); + assertThat(response.getSource().get(DATA_FIELD_NAME)).isEqualTo(DATA_PREFIX + i); } } diff --git a/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java index d8053d4c9a4a7..502ad7219a249 100644 --- a/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java +++ b/flink-connectors/flink-connector-elasticsearch6/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch6DynamicSinkFactoryTest.java @@ -18,9 +18,12 @@ package org.apache.flink.connector.elasticsearch.table; +import org.apache.flink.table.api.ValidationException; + import org.junit.jupiter.api.Test; import static org.apache.flink.connector.elasticsearch.table.TestContext.context; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for validation in {@link Elasticsearch6DynamicSinkFactory}. */ public class Elasticsearch6DynamicSinkFactoryTest extends ElasticsearchDynamicSinkFactoryBaseTest { @@ -42,14 +45,15 @@ TestContext createPrefilledTestContext() { public void validateEmptyConfiguration() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "One or more required options are missing.\n" - + "\n" - + "Missing required options are:\n" - + "\n" - + "document-type\n" - + "hosts\n" - + "index", - () -> sinkFactory.createDynamicTableSink(context().build())); + assertThatThrownBy(() -> sinkFactory.createDynamicTableSink(context().build())) + .isInstanceOf(ValidationException.class) + .hasMessage( + "One or more required options are missing.\n" + + "\n" + + "Missing required options are:\n" + + "\n" + + "document-type\n" + + "hosts\n" + + "index"); } } diff --git a/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java b/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java index e82477252e015..26c0df17861ce 100644 --- a/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java +++ b/flink-connectors/flink-connector-elasticsearch7/src/test/java/org/apache/flink/connector/elasticsearch/table/Elasticsearch7DynamicSinkFactoryTest.java @@ -18,9 +18,12 @@ package org.apache.flink.connector.elasticsearch.table; +import org.apache.flink.table.api.ValidationException; + import org.junit.jupiter.api.Test; import static org.apache.flink.connector.elasticsearch.table.TestContext.context; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for validation in {@link Elasticsearch7DynamicSinkFactory}. */ public class Elasticsearch7DynamicSinkFactoryTest extends ElasticsearchDynamicSinkFactoryBaseTest { @@ -41,13 +44,14 @@ TestContext createPrefilledTestContext() { public void validateEmptyConfiguration() { ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory(); - assertValidationException( - "One or more required options are missing.\n" - + "\n" - + "Missing required options are:\n" - + "\n" - + "hosts\n" - + "index", - () -> sinkFactory.createDynamicTableSink(context().build())); + assertThatThrownBy(() -> sinkFactory.createDynamicTableSink(context().build())) + .isInstanceOf(ValidationException.class) + .hasMessage( + "One or more required options are missing.\n" + + "\n" + + "Missing required options are:\n" + + "\n" + + "hosts\n" + + "index"); } } From 22c6ebc2587e7a6f6c696fcd3973c43f3d5e701d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 6 May 2022 11:09:59 +0200 Subject: [PATCH 04/16] [FLINK-27185][connector] Convert connector-hive module to assertj Co-authored-by: slinkydeveloper --- .../hive/HiveDeserializeExceptionTest.java | 2 +- .../connectors/hive/HiveDialectITCase.java | 443 +++++++++--------- .../hive/HiveDialectQueryITCase.java | 160 +++---- .../hive/HiveDynamicTableFactoryTest.java | 78 ++- .../connectors/hive/HiveLookupJoinITCase.java | 40 +- .../hive/HiveOutputFormatFactoryTest.java | 6 +- .../connectors/hive/HiveRunnerITCase.java | 103 ++-- .../connectors/hive/HiveSourceITCase.java | 20 +- .../connectors/hive/HiveTableFactoryTest.java | 12 +- .../connectors/hive/HiveTableSinkITCase.java | 73 ++- .../hive/HiveTableSourceITCase.java | 250 +++++----- .../connectors/hive/PartitionMonitorTest.java | 10 +- .../hive/TableEnvHiveConnectorITCase.java | 129 ++--- .../HiveInputFormatPartitionReaderITCase.java | 12 +- .../hive/read/HivePartitionFetcherTest.java | 8 +- .../read/HiveTableFileInputFormatTest.java | 5 +- .../hive/util/HiveConfUtilsTest.java | 19 +- .../catalog/hive/HiveCatalogDataTypeTest.java | 4 +- .../hive/HiveCatalogGenericMetadataTest.java | 47 +- .../hive/HiveCatalogHiveMetadataTest.java | 65 ++- .../catalog/hive/HiveCatalogUdfITCase.java | 12 +- .../factories/HiveCatalogFactoryTest.java | 64 ++- .../catalog/hive/util/HiveTableUtilTest.java | 8 +- .../functions/hive/HiveGenericUDAFTest.java | 10 +- .../functions/hive/HiveGenericUDFTest.java | 67 +-- .../functions/hive/HiveGenericUDTFTest.java | 18 +- .../functions/hive/HiveSimpleUDFTest.java | 51 +- .../module/hive/HiveModuleFactoryTest.java | 4 +- .../table/module/hive/HiveModuleTest.java | 48 +- .../delegation/hive/HiveASTParserTest.java | 4 +- 30 files changed, 874 insertions(+), 898 deletions(-) diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java index 1f81c7ec91773..bcea45b159493 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java @@ -41,7 +41,7 @@ import java.util.Collections; import java.util.Properties; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.fail; import static org.junit.Assume.assumeTrue; /** diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java index 1aa6f506963cb..b92c6e7600558 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java @@ -76,15 +76,8 @@ import java.util.LinkedHashMap; import java.util.List; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Test Hive syntax when Hive dialect is used. */ public class HiveDialectITCase { @@ -123,14 +116,14 @@ public void testPluggableParser() { TableEnvironmentInternal tableEnvInternal = (TableEnvironmentInternal) tableEnv; Parser parser = tableEnvInternal.getParser(); // hive dialect should use HiveParser - assertTrue(parser instanceof HiveParser); + assertThat(parser).isInstanceOf(HiveParser.class); // execute some sql and verify the parser instance is reused tableEnvInternal.executeSql("show databases"); - assertSame(parser, tableEnvInternal.getParser()); + assertThat(tableEnvInternal.getParser()).isSameAs(parser); // switching dialect will result in a new parser tableEnvInternal.getConfig().setSqlDialect(SqlDialect.DEFAULT); - assertNotEquals( - parser.getClass().getName(), tableEnvInternal.getParser().getClass().getName()); + assertThat(tableEnvInternal.getParser().getClass().getName()) + .isNotEqualTo(parser.getClass().getName()); } @Test @@ -139,19 +132,19 @@ public void testParseCommand() { Parser parser = tableEnvInternal.getParser(); // hive dialect should use HiveParser - assertTrue(parser instanceof HiveParser); - assertThat(parser.parse("HELP").get(0), instanceOf(HelpOperation.class)); - assertThat(parser.parse("clear").get(0), instanceOf(ClearOperation.class)); - assertThat(parser.parse("SET").get(0), instanceOf(SetOperation.class)); - assertThat(parser.parse("ResET").get(0), instanceOf(ResetOperation.class)); - assertThat(parser.parse("Exit").get(0), instanceOf(QuitOperation.class)); + assertThat(parser).isInstanceOf(HiveParser.class); + assertThat(parser.parse("HELP").get(0)).isInstanceOf(HelpOperation.class); + assertThat(parser.parse("clear").get(0)).isInstanceOf(ClearOperation.class); + assertThat(parser.parse("SET").get(0)).isInstanceOf(SetOperation.class); + assertThat(parser.parse("ResET").get(0)).isInstanceOf(ResetOperation.class); + assertThat(parser.parse("Exit").get(0)).isInstanceOf(QuitOperation.class); } @Test public void testCreateDatabase() throws Exception { tableEnv.executeSql("create database db1 comment 'db1 comment'"); Database db = hiveCatalog.getHiveDatabase("db1"); - assertEquals("db1 comment", db.getDescription()); + assertThat(db.getDescription()).isEqualTo("db1 comment"); String db2Location = warehouse + "/db2_location"; tableEnv.executeSql( @@ -159,8 +152,8 @@ public void testCreateDatabase() throws Exception { "create database db2 location '%s' with dbproperties('k1'='v1')", db2Location)); db = hiveCatalog.getHiveDatabase("db2"); - assertEquals(db2Location, locationPath(db.getLocationUri())); - assertEquals("v1", db.getParameters().get("k1")); + assertThat(locationPath(db.getLocationUri())).isEqualTo(db2Location); + assertThat(db.getParameters().get("k1")).isEqualTo("v1"); } @Test @@ -169,26 +162,26 @@ public void testAlterDatabase() throws Exception { tableEnv.executeSql("create database db1 with dbproperties('k1'='v1')"); tableEnv.executeSql("alter database db1 set dbproperties ('k1'='v11','k2'='v2')"); Database db = hiveCatalog.getHiveDatabase("db1"); - assertEquals("v11", db.getParameters().get("k1")); - assertEquals("v2", db.getParameters().get("k2")); + assertThat(db.getParameters().get("k1")).isEqualTo("v11"); + assertThat(db.getParameters().get("k2")).isEqualTo("v2"); // alter owner tableEnv.executeSql("alter database db1 set owner user user1"); db = hiveCatalog.getHiveDatabase("db1"); - assertEquals("user1", db.getOwnerName()); - assertEquals(PrincipalType.USER, db.getOwnerType()); + assertThat(db.getOwnerName()).isEqualTo("user1"); + assertThat(db.getOwnerType()).isEqualTo(PrincipalType.USER); tableEnv.executeSql("alter database db1 set owner role role1"); db = hiveCatalog.getHiveDatabase("db1"); - assertEquals("role1", db.getOwnerName()); - assertEquals(PrincipalType.ROLE, db.getOwnerType()); + assertThat(db.getOwnerName()).isEqualTo("role1"); + assertThat(db.getOwnerType()).isEqualTo(PrincipalType.ROLE); // alter location if (hiveCatalog.getHiveVersion().compareTo("2.4.0") >= 0) { String newLocation = warehouse + "/db1_new_location"; tableEnv.executeSql(String.format("alter database db1 set location '%s'", newLocation)); db = hiveCatalog.getHiveDatabase("db1"); - assertEquals(newLocation, locationPath(db.getLocationUri())); + assertThat(locationPath(db.getLocationUri())).isEqualTo(newLocation); } } @@ -200,86 +193,85 @@ public void testCreateTable() throws Exception { "create external table tbl1 (d decimal(10,0),ts timestamp) partitioned by (p string) location '%s' tblproperties('k1'='v1')", location)); Table hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl1")); - assertEquals(TableType.EXTERNAL_TABLE.toString(), hiveTable.getTableType()); - assertEquals(1, hiveTable.getPartitionKeysSize()); - assertEquals(location, locationPath(hiveTable.getSd().getLocation())); - assertEquals("v1", hiveTable.getParameters().get("k1")); - assertFalse(hiveTable.getParameters().containsKey(SqlCreateHiveTable.TABLE_LOCATION_URI)); + assertThat(hiveTable.getTableType()).isEqualTo(TableType.EXTERNAL_TABLE.toString()); + assertThat(hiveTable.getPartitionKeysSize()).isEqualTo(1); + assertThat(locationPath(hiveTable.getSd().getLocation())).isEqualTo(location); + assertThat(hiveTable.getParameters().get("k1")).isEqualTo("v1"); + assertThat(hiveTable.getParameters()) + .doesNotContainKey(SqlCreateHiveTable.TABLE_LOCATION_URI); tableEnv.executeSql("create table tbl2 (s struct) stored as orc"); hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl2")); - assertEquals(TableType.MANAGED_TABLE.toString(), hiveTable.getTableType()); - assertEquals( - OrcSerde.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib()); - assertEquals(OrcInputFormat.class.getName(), hiveTable.getSd().getInputFormat()); - assertEquals(OrcOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat()); + assertThat(hiveTable.getTableType()).isEqualTo(TableType.MANAGED_TABLE.toString()); + assertThat(hiveTable.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(OrcSerde.class.getName()); + assertThat(hiveTable.getSd().getInputFormat()).isEqualTo(OrcInputFormat.class.getName()); + assertThat(hiveTable.getSd().getOutputFormat()).isEqualTo(OrcOutputFormat.class.getName()); tableEnv.executeSql( "create table tbl3 (m map) partitioned by (p1 bigint,p2 tinyint) " + "row format serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe'"); hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl3")); - assertEquals(2, hiveTable.getPartitionKeysSize()); - assertEquals( - LazyBinarySerDe.class.getName(), - hiveTable.getSd().getSerdeInfo().getSerializationLib()); + assertThat(hiveTable.getPartitionKeysSize()).isEqualTo(2); + assertThat(hiveTable.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(LazyBinarySerDe.class.getName()); tableEnv.executeSql( "create table tbl4 (x int,y smallint) row format delimited fields terminated by '|' lines terminated by '\n'"); hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl4")); - assertEquals( - "|", - hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.FIELD_DELIM)); - assertEquals( - "|", - hiveTable - .getSd() - .getSerdeInfo() - .getParameters() - .get(serdeConstants.SERIALIZATION_FORMAT)); - assertEquals( - "\n", - hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM)); + assertThat(hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.FIELD_DELIM)) + .isEqualTo("|"); + assertThat( + hiveTable + .getSd() + .getSerdeInfo() + .getParameters() + .get(serdeConstants.SERIALIZATION_FORMAT)) + .isEqualTo("|"); + assertThat(hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM)) + .isEqualTo("\n"); tableEnv.executeSql( "create table tbl5 (m map) row format delimited collection items terminated by ';' " + "map keys terminated by ':'"); hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl5")); - assertEquals( - ";", - hiveTable - .getSd() - .getSerdeInfo() - .getParameters() - .get(serdeConstants.COLLECTION_DELIM)); - assertEquals( - ":", - hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.MAPKEY_DELIM)); + assertThat( + hiveTable + .getSd() + .getSerdeInfo() + .getParameters() + .get(serdeConstants.COLLECTION_DELIM)) + .isEqualTo(";"); + assertThat( + hiveTable + .getSd() + .getSerdeInfo() + .getParameters() + .get(serdeConstants.MAPKEY_DELIM)) + .isEqualTo(":"); int createdTimeForTableExists = hiveTable.getCreateTime(); tableEnv.executeSql("create table if not exists tbl5 (m map)"); hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl5")); - assertEquals(createdTimeForTableExists, hiveTable.getCreateTime()); + assertThat(hiveTable.getCreateTime()).isEqualTo(createdTimeForTableExists); // test describe table Parser parser = ((TableEnvironmentInternal) tableEnv).getParser(); DescribeTableOperation operation = (DescribeTableOperation) parser.parse("desc tbl1").get(0); - assertFalse(operation.isExtended()); - assertEquals( - ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl1"), - operation.getSqlIdentifier()); + assertThat(operation.isExtended()).isFalse(); + assertThat(operation.getSqlIdentifier()) + .isEqualTo(ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl1")); operation = (DescribeTableOperation) parser.parse("describe default.tbl2").get(0); - assertFalse(operation.isExtended()); - assertEquals( - ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl2"), - operation.getSqlIdentifier()); + assertThat(operation.isExtended()).isFalse(); + assertThat(operation.getSqlIdentifier()) + .isEqualTo(ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl2")); operation = (DescribeTableOperation) parser.parse("describe extended tbl3").get(0); - assertTrue(operation.isExtended()); - assertEquals( - ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl3"), - operation.getSqlIdentifier()); + assertThat(operation.isExtended()).isTrue(); + assertThat(operation.getSqlIdentifier()) + .isEqualTo(ObjectIdentifier.of(hiveCatalog.getName(), "default", "tbl3")); } @Test @@ -291,17 +283,17 @@ public void testCreateTableWithConstraints() throws Exception { CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(new ObjectPath("default", "tbl")); TableSchema tableSchema = catalogTable.getSchema(); - assertTrue("PK not present", tableSchema.getPrimaryKey().isPresent()); - assertEquals("pk_name", tableSchema.getPrimaryKey().get().getName()); - assertFalse( - "PK cannot be null", - tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable()); - assertFalse( - "RELY NOT NULL should be reflected in schema", - tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable()); - assertTrue( - "NORELY NOT NULL shouldn't be reflected in schema", - tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable()); + assertThat(tableSchema.getPrimaryKey()).as("PK not present").isPresent(); + assertThat(tableSchema.getPrimaryKey().get().getName()).isEqualTo("pk_name"); + assertThat(tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable()) + .as("PK cannot be null") + .isFalse(); + assertThat(tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable()) + .as("RELY NOT NULL should be reflected in schema") + .isFalse(); + assertThat(tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable()) + .as("NORELY NOT NULL shouldn't be reflected in schema") + .isTrue(); } @Test @@ -309,20 +301,20 @@ public void testCreateTableAs() throws Exception { tableEnv.executeSql("create table src (x int,y string)"); tableEnv.executeSql("create table tbl1 as select x from src group by x").await(); Table hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl1")); - assertEquals(1, hiveTable.getSd().getCols().size()); - assertEquals("x", hiveTable.getSd().getCols().get(0).getName()); - assertEquals("int", hiveTable.getSd().getCols().get(0).getType()); + assertThat(hiveTable.getSd().getCols()).hasSize(1); + assertThat(hiveTable.getSd().getCols().get(0).getName()).isEqualTo("x"); + assertThat(hiveTable.getSd().getCols().get(0).getType()).isEqualTo("int"); tableEnv.executeSql( "create table default.tbl2 stored as orc as select x,max(y) as m from src group by x order by x limit 1") .await(); hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl2")); - assertEquals(2, hiveTable.getSd().getCols().size()); - assertEquals("x", hiveTable.getSd().getCols().get(0).getName()); - assertEquals("m", hiveTable.getSd().getCols().get(1).getName()); - assertEquals( - OrcSerde.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib()); - assertEquals(OrcInputFormat.class.getName(), hiveTable.getSd().getInputFormat()); - assertEquals(OrcOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat()); + assertThat(hiveTable.getSd().getCols()).hasSize(2); + assertThat(hiveTable.getSd().getCols().get(0).getName()).isEqualTo("x"); + assertThat(hiveTable.getSd().getCols().get(1).getName()).isEqualTo("m"); + assertThat(hiveTable.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(OrcSerde.class.getName()); + assertThat(hiveTable.getSd().getInputFormat()).isEqualTo(OrcInputFormat.class.getName()); + assertThat(hiveTable.getSd().getOutputFormat()).isEqualTo(OrcOutputFormat.class.getName()); } @Test @@ -335,28 +327,29 @@ public void testInsert() throws Exception { tableEnv.executeSql("create table dest (x int)"); tableEnv.executeSql("insert into dest select x from src").await(); List results = queryResult(tableEnv.sqlQuery("select * from dest")); - assertEquals("[+I[1], +I[2], +I[3]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1], +I[2], +I[3]]"); tableEnv.executeSql("insert overwrite dest values (3),(4),(5)").await(); results = queryResult(tableEnv.sqlQuery("select * from dest")); - assertEquals("[+I[3], +I[4], +I[5]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[3], +I[4], +I[5]]"); // partitioned dest table tableEnv.executeSql("create table dest2 (x int) partitioned by (p1 int,p2 string)"); tableEnv.executeSql("insert into dest2 partition (p1=0,p2='static') select x from src") .await(); results = queryResult(tableEnv.sqlQuery("select * from dest2 order by x,p1,p2")); - assertEquals("[+I[1, 0, static], +I[2, 0, static], +I[3, 0, static]]", results.toString()); + assertThat(results.toString()) + .isEqualTo("[+I[1, 0, static], +I[2, 0, static], +I[3, 0, static]]"); tableEnv.executeSql("insert into dest2 partition (p1=1,p2) select x,y from src").await(); results = queryResult(tableEnv.sqlQuery("select * from dest2 order by x,p1,p2")); - assertEquals( - "[+I[1, 0, static], +I[1, 1, a], +I[2, 0, static], +I[2, 1, b], +I[3, 0, static], +I[3, 1, c]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[1, 0, static], +I[1, 1, a], +I[2, 0, static], +I[2, 1, b], +I[3, 0, static], +I[3, 1, c]]"); tableEnv.executeSql("insert overwrite dest2 partition (p1,p2) select 1,x,y from src") .await(); results = queryResult(tableEnv.sqlQuery("select * from dest2 order by x,p1,p2")); - assertEquals( - "[+I[1, 0, static], +I[1, 1, a], +I[1, 2, b], +I[1, 3, c], +I[2, 0, static], +I[2, 1, b], +I[3, 0, static], +I[3, 1, c]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[1, 0, static], +I[1, 1, a], +I[1, 2, b], +I[1, 3, c], +I[2, 0, static], +I[2, 1, b], +I[3, 0, static], +I[3, 1, c]]"); } @Test @@ -369,23 +362,23 @@ public void testAlterTable() throws Exception { // change properties tableEnv.executeSql("alter table `default`.tbl1 set tblproperties ('k2'='v2')"); Table hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals("v1", hiveTable.getParameters().get("k1")); - assertEquals("v2", hiveTable.getParameters().get("k2")); + assertThat(hiveTable.getParameters().get("k1")).isEqualTo("v1"); + assertThat(hiveTable.getParameters().get("k2")).isEqualTo("v2"); // change location String newLocation = warehouse + "/tbl1_new_location"; tableEnv.executeSql( String.format("alter table default.tbl1 set location '%s'", newLocation)); hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals(newLocation, locationPath(hiveTable.getSd().getLocation())); + assertThat(locationPath(hiveTable.getSd().getLocation())).isEqualTo(newLocation); // change file format tableEnv.executeSql("alter table tbl1 set fileformat orc"); hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals( - OrcSerde.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib()); - assertEquals(OrcInputFormat.class.getName(), hiveTable.getSd().getInputFormat()); - assertEquals(OrcOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat()); + assertThat(hiveTable.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(OrcSerde.class.getName()); + assertThat(hiveTable.getSd().getInputFormat()).isEqualTo(OrcInputFormat.class.getName()); + assertThat(hiveTable.getSd().getOutputFormat()).isEqualTo(OrcOutputFormat.class.getName()); // change serde tableEnv.executeSql( @@ -393,58 +386,54 @@ public void testAlterTable() throws Exception { "alter table tbl1 set serde '%s' with serdeproperties('%s'='%s')", LazyBinarySerDe.class.getName(), serdeConstants.FIELD_DELIM, "\u0001")); hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals( - LazyBinarySerDe.class.getName(), - hiveTable.getSd().getSerdeInfo().getSerializationLib()); - assertEquals( - "\u0001", - hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.FIELD_DELIM)); + assertThat(hiveTable.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(LazyBinarySerDe.class.getName()); + assertThat(hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.FIELD_DELIM)) + .isEqualTo("\u0001"); // replace columns tableEnv.executeSql( "alter table tbl1 replace columns (t tinyint,s smallint,i int,b bigint,f float,d double,num decimal," + "ts timestamp,dt date,str string,var varchar(10),ch char(123),bool boolean,bin binary)"); hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals(14, hiveTable.getSd().getColsSize()); - assertEquals("varchar(10)", hiveTable.getSd().getCols().get(10).getType()); - assertEquals("char(123)", hiveTable.getSd().getCols().get(11).getType()); + assertThat(hiveTable.getSd().getColsSize()).isEqualTo(14); + assertThat(hiveTable.getSd().getCols().get(10).getType()).isEqualTo("varchar(10)"); + assertThat(hiveTable.getSd().getCols().get(11).getType()).isEqualTo("char(123)"); tableEnv.executeSql( "alter table tbl1 replace columns (a array>,s struct, f2:map>," + "m map>)"); hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals("array>", hiveTable.getSd().getCols().get(0).getType()); - assertEquals( - "struct,f2:map>", - hiveTable.getSd().getCols().get(1).getType()); - assertEquals( - "map>", - hiveTable.getSd().getCols().get(2).getType()); + assertThat(hiveTable.getSd().getCols().get(0).getType()).isEqualTo("array>"); + assertThat(hiveTable.getSd().getCols().get(1).getType()) + .isEqualTo("struct,f2:map>"); + assertThat(hiveTable.getSd().getCols().get(2).getType()) + .isEqualTo("map>"); // add columns tableEnv.executeSql("alter table tbl1 add columns (x int,y int)"); hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals(5, hiveTable.getSd().getColsSize()); + assertThat(hiveTable.getSd().getColsSize()).isEqualTo(5); // change column tableEnv.executeSql("alter table tbl1 change column x x1 string comment 'new x col'"); hiveTable = hiveCatalog.getHiveTable(tablePath); - assertEquals(5, hiveTable.getSd().getColsSize()); + assertThat(hiveTable.getSd().getColsSize()).isEqualTo(5); FieldSchema newField = hiveTable.getSd().getCols().get(3); - assertEquals("x1", newField.getName()); - assertEquals("string", newField.getType()); + assertThat(newField.getName()).isEqualTo("x1"); + assertThat(newField.getType()).isEqualTo("string"); tableEnv.executeSql("alter table tbl1 change column y y int first"); hiveTable = hiveCatalog.getHiveTable(tablePath); newField = hiveTable.getSd().getCols().get(0); - assertEquals("y", newField.getName()); - assertEquals("int", newField.getType()); + assertThat(newField.getName()).isEqualTo("y"); + assertThat(newField.getType()).isEqualTo("int"); tableEnv.executeSql("alter table tbl1 change column x1 x2 timestamp after y"); hiveTable = hiveCatalog.getHiveTable(tablePath); newField = hiveTable.getSd().getCols().get(1); - assertEquals("x2", newField.getName()); - assertEquals("timestamp", newField.getType()); + assertThat(newField.getName()).isEqualTo("x2"); + assertThat(newField.getType()).isEqualTo("timestamp"); // add/replace columns cascade tableEnv.executeSql("create table tbl2 (x int) partitioned by (dt date,id bigint)"); @@ -470,28 +459,28 @@ public void testAlterTable() throws Exception { ObjectPath tablePath2 = new ObjectPath("default", "tbl2"); hiveTable = hiveCatalog.getHiveTable(tablePath2); Partition hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1); - assertEquals(2, hivePartition.getSd().getColsSize()); + assertThat(hivePartition.getSd().getColsSize()).isEqualTo(2); hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2); - assertEquals(2, hivePartition.getSd().getColsSize()); + assertThat(hivePartition.getSd().getColsSize()).isEqualTo(2); tableEnv.executeSql("alter table tbl2 add columns (ch char(5),vch varchar(9)) cascade"); hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1); - assertEquals(4, hivePartition.getSd().getColsSize()); + assertThat(hivePartition.getSd().getColsSize()).isEqualTo(4); hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2); - assertEquals(4, hivePartition.getSd().getColsSize()); + assertThat(hivePartition.getSd().getColsSize()).isEqualTo(4); // change column cascade tableEnv.executeSql("alter table tbl2 change column ch ch char(10) cascade"); hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1); - assertEquals("char(10)", hivePartition.getSd().getCols().get(2).getType()); + assertThat(hivePartition.getSd().getCols().get(2).getType()).isEqualTo("char(10)"); hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2); - assertEquals("char(10)", hivePartition.getSd().getCols().get(2).getType()); + assertThat(hivePartition.getSd().getCols().get(2).getType()).isEqualTo("char(10)"); tableEnv.executeSql("alter table tbl2 change column vch str string first cascade"); hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec1); - assertEquals("str", hivePartition.getSd().getCols().get(0).getName()); + assertThat(hivePartition.getSd().getCols().get(0).getName()).isEqualTo("str"); hivePartition = hiveCatalog.getHivePartition(hiveTable, partitionSpec2); - assertEquals("str", hivePartition.getSd().getCols().get(0).getName()); + assertThat(hivePartition.getSd().getCols().get(0).getName()).isEqualTo("str"); } @Test @@ -527,17 +516,17 @@ public void testAlterPartition() throws Exception { "alter table tbl partition (p1=1000,p2='2020-05-01') set location '%s'", location)); Partition partition = hiveCatalog.getHivePartition(hiveTable, spec1); - assertEquals(location, locationPath(partition.getSd().getLocation())); + assertThat(locationPath(partition.getSd().getLocation())).isEqualTo(location); // change file format tableEnv.executeSql( "alter table tbl partition (p1=2000,p2='2020-01-01') set fileformat rcfile"); partition = hiveCatalog.getHivePartition(hiveTable, spec2); - assertEquals( - LazyBinaryColumnarSerDe.class.getName(), - partition.getSd().getSerdeInfo().getSerializationLib()); - assertEquals(RCFileInputFormat.class.getName(), partition.getSd().getInputFormat()); - assertEquals(RCFileOutputFormat.class.getName(), partition.getSd().getOutputFormat()); + assertThat(partition.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(LazyBinaryColumnarSerDe.class.getName()); + assertThat(partition.getSd().getInputFormat()).isEqualTo(RCFileInputFormat.class.getName()); + assertThat(partition.getSd().getOutputFormat()) + .isEqualTo(RCFileOutputFormat.class.getName()); // change serde tableEnv.executeSql( @@ -545,12 +534,10 @@ public void testAlterPartition() throws Exception { "alter table tbl partition (p1=1000,p2='2020-05-01') set serde '%s' with serdeproperties('%s'='%s')", LazyBinarySerDe.class.getName(), serdeConstants.LINE_DELIM, "\n")); partition = hiveCatalog.getHivePartition(hiveTable, spec1); - assertEquals( - LazyBinarySerDe.class.getName(), - partition.getSd().getSerdeInfo().getSerializationLib()); - assertEquals( - "\n", - partition.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM)); + assertThat(partition.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(LazyBinarySerDe.class.getName()); + assertThat(partition.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM)) + .isEqualTo("\n"); } @Test @@ -562,28 +549,30 @@ public void testView() throws Exception { "create view v(vx) comment 'v comment' tblproperties ('k1'='v1') as select x from tbl"); ObjectPath viewPath = new ObjectPath("default", "v"); CatalogBaseTable catalogBaseTable = hiveCatalog.getTable(viewPath); - assertTrue(catalogBaseTable instanceof CatalogView); - assertEquals("vx", catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName()); - assertEquals("v1", catalogBaseTable.getOptions().get("k1")); + assertThat(catalogBaseTable).isInstanceOf(CatalogView.class); + assertThat(catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName()) + .isEqualTo("vx"); + assertThat(catalogBaseTable.getOptions().get("k1")).isEqualTo("v1"); // change properties tableEnv.executeSql("alter view v set tblproperties ('k1'='v11')"); catalogBaseTable = hiveCatalog.getTable(viewPath); - assertEquals("v11", catalogBaseTable.getOptions().get("k1")); + assertThat(catalogBaseTable.getOptions().get("k1")).isEqualTo("v11"); // change query tableEnv.executeSql("alter view v as select y from tbl"); catalogBaseTable = hiveCatalog.getTable(viewPath); - assertEquals("y", catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName()); + assertThat(catalogBaseTable.getUnresolvedSchema().getColumns().get(0).getName()) + .isEqualTo("y"); // rename tableEnv.executeSql("alter view v rename to v1"); viewPath = new ObjectPath("default", "v1"); - assertTrue(hiveCatalog.tableExists(viewPath)); + assertThat(hiveCatalog.tableExists(viewPath)).isTrue(); // drop tableEnv.executeSql("drop view v1"); - assertFalse(hiveCatalog.tableExists(viewPath)); + assertThat(hiveCatalog.tableExists(viewPath)).isFalse(); } @Test @@ -594,16 +583,15 @@ public void testFunction() throws Exception { "create function default.my_abs as '%s'", GenericUDFAbs.class.getName())); List functions = CollectionUtil.iteratorToList(tableEnv.executeSql("show functions").collect()); - assertTrue(functions.toString().contains("my_abs")); + assertThat(functions.toString()).contains("my_abs"); // call the function tableEnv.executeSql("create table src(x int)"); tableEnv.executeSql("insert into src values (1),(-1)").await(); - assertEquals( - "[+I[1], +I[1]]", - queryResult(tableEnv.sqlQuery("select my_abs(x) from src")).toString()); + assertThat(queryResult(tableEnv.sqlQuery("select my_abs(x) from src")).toString()) + .isEqualTo("[+I[1], +I[1]]"); // drop the function tableEnv.executeSql("drop function my_abs"); - assertFalse(hiveCatalog.functionExists(new ObjectPath("default", "my_abs"))); + assertThat(hiveCatalog.functionExists(new ObjectPath("default", "my_abs"))).isFalse(); tableEnv.executeSql("drop function if exists foo"); } @@ -615,23 +603,23 @@ public void testTemporaryFunction() throws Exception { "create temporary function temp_abs as '%s'", GenericUDFAbs.class.getName())); String[] functions = tableEnv.listUserDefinedFunctions(); - assertArrayEquals(new String[] {"temp_abs"}, functions); + assertThat(functions).isEqualTo(new String[] {"temp_abs"}); // call the function tableEnv.executeSql("create table src(x int)"); tableEnv.executeSql("insert into src values (1),(-1)").await(); - assertEquals( - "[+I[1], +I[1]]", - queryResult(tableEnv.sqlQuery("select temp_abs(x) from src")).toString()); + assertThat(queryResult(tableEnv.sqlQuery("select temp_abs(x) from src")).toString()) + .isEqualTo("[+I[1], +I[1]]"); // switch DB and the temp function can still be used tableEnv.executeSql("create database db1"); tableEnv.useDatabase("db1"); - assertEquals( - "[+I[1], +I[1]]", - queryResult(tableEnv.sqlQuery("select temp_abs(x) from `default`.src")).toString()); + assertThat( + queryResult(tableEnv.sqlQuery("select temp_abs(x) from `default`.src")) + .toString()) + .isEqualTo("[+I[1], +I[1]]"); // drop the function tableEnv.executeSql("drop temporary function temp_abs"); functions = tableEnv.listUserDefinedFunctions(); - assertEquals(0, functions.length); + assertThat(functions).isEmpty(); tableEnv.executeSql("drop temporary function if exists foo"); } @@ -643,24 +631,23 @@ public void testTemporaryFunctionUDAF() throws Exception { "create temporary function temp_count as '%s'", GenericUDAFCount.class.getName())); String[] functions = tableEnv.listUserDefinedFunctions(); - assertArrayEquals(new String[] {"temp_count"}, functions); + assertThat(functions).isEqualTo(new String[] {"temp_count"}); // call the function tableEnv.executeSql("create table src(x int)"); tableEnv.executeSql("insert into src values (1),(-1)").await(); - assertEquals( - "[+I[2]]", - queryResult(tableEnv.sqlQuery("select temp_count(x) from src")).toString()); + assertThat(queryResult(tableEnv.sqlQuery("select temp_count(x) from src")).toString()) + .isEqualTo("[+I[2]]"); // switch DB and the temp function can still be used tableEnv.executeSql("create database db1"); tableEnv.useDatabase("db1"); - assertEquals( - "[+I[2]]", - queryResult(tableEnv.sqlQuery("select temp_count(x) from `default`.src")) - .toString()); + assertThat( + queryResult(tableEnv.sqlQuery("select temp_count(x) from `default`.src")) + .toString()) + .isEqualTo("[+I[2]]"); // drop the function tableEnv.executeSql("drop temporary function temp_count"); functions = tableEnv.listUserDefinedFunctions(); - assertEquals(0, functions.length); + assertThat(functions).isEmpty(); tableEnv.executeSql("drop temporary function if exists foo"); } @@ -668,19 +655,19 @@ public void testTemporaryFunctionUDAF() throws Exception { public void testCatalog() { List catalogs = CollectionUtil.iteratorToList(tableEnv.executeSql("show catalogs").collect()); - assertEquals(2, catalogs.size()); + assertThat(catalogs).hasSize(2); tableEnv.executeSql("use catalog " + CatalogManagerMocks.DEFAULT_CATALOG); List databases = CollectionUtil.iteratorToList(tableEnv.executeSql("show databases").collect()); - assertEquals(1, databases.size()); - assertEquals( - "+I[" + CatalogManagerMocks.DEFAULT_DATABASE + "]", databases.get(0).toString()); + assertThat(databases).hasSize(1); + assertThat(databases.get(0).toString()) + .isEqualTo("+I[" + CatalogManagerMocks.DEFAULT_DATABASE + "]"); String catalogName = tableEnv.executeSql("show current catalog").collect().next().toString(); - assertEquals("+I[" + CatalogManagerMocks.DEFAULT_CATALOG + "]", catalogName); + assertThat(catalogName).isEqualTo("+I[" + CatalogManagerMocks.DEFAULT_CATALOG + "]"); String databaseName = tableEnv.executeSql("show current database").collect().next().toString(); - assertEquals("+I[" + CatalogManagerMocks.DEFAULT_DATABASE + "]", databaseName); + assertThat(databaseName).isEqualTo("+I[" + CatalogManagerMocks.DEFAULT_DATABASE + "]"); } @Test @@ -691,7 +678,7 @@ public void testAddDropPartitions() throws Exception { "alter table tbl add partition (dt='2020-04-30',country='china') partition (dt='2020-04-30',country='us')"); ObjectPath tablePath = new ObjectPath("default", "tbl"); - assertEquals(2, hiveCatalog.listPartitions(tablePath).size()); + assertThat(hiveCatalog.listPartitions(tablePath)).hasSize(2); String partLocation = warehouse + "/part3_location"; tableEnv.executeSql( @@ -708,11 +695,11 @@ public void testAddDropPartitions() throws Exception { } }); Partition hivePartition = hiveCatalog.getHivePartition(hiveTable, spec); - assertEquals(partLocation, locationPath(hivePartition.getSd().getLocation())); + assertThat(locationPath(hivePartition.getSd().getLocation())).isEqualTo(partLocation); tableEnv.executeSql( "alter table tbl drop partition (dt='2020-04-30',country='china'),partition (dt='2020-05-01',country='belgium')"); - assertEquals(1, hiveCatalog.listPartitions(tablePath).size()); + assertThat(hiveCatalog.listPartitions(tablePath)).hasSize(1); } @Test @@ -723,55 +710,55 @@ public void testShowPartitions() throws Exception { "alter table tbl add partition (dt='2020-04-30',country='china') partition (dt='2020-04-30',country='us')"); ObjectPath tablePath = new ObjectPath("default", "tbl"); - assertEquals(2, hiveCatalog.listPartitions(tablePath).size()); + assertThat(hiveCatalog.listPartitions(tablePath)).hasSize(2); List partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect()); - assertEquals(2, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30/country=china")); - assertTrue(partitions.toString().contains("dt=2020-04-30/country=us")); + assertThat(partitions).hasSize(2); + assertThat(partitions.toString()).contains("dt=2020-04-30/country=china"); + assertThat(partitions.toString()).contains("dt=2020-04-30/country=us"); partitions = CollectionUtil.iteratorToList( tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30')") .collect()); - assertEquals(2, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30/country=china")); - assertTrue(partitions.toString().contains("dt=2020-04-30/country=us")); + assertThat(partitions).hasSize(2); + assertThat(partitions.toString()).contains("dt=2020-04-30/country=china"); + assertThat(partitions.toString()).contains("dt=2020-04-30/country=us"); partitions = CollectionUtil.iteratorToList( tableEnv.executeSql("show partitions tbl partition (country='china')") .collect()); - assertEquals(1, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30/country=china")); + assertThat(partitions).hasSize(1); + assertThat(partitions.toString()).contains("dt=2020-04-30/country=china"); partitions = CollectionUtil.iteratorToList( tableEnv.executeSql( "show partitions tbl partition (dt='2020-04-30',country='china')") .collect()); - assertEquals(1, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30/country=china")); + assertThat(partitions).hasSize(1); + assertThat(partitions.toString()).contains("dt=2020-04-30/country=china"); partitions = CollectionUtil.iteratorToList( tableEnv.executeSql( "show partitions tbl partition (dt='2020-05-01',country='japan')") .collect()); - assertEquals(0, partitions.size()); + assertThat(partitions).isEmpty(); try { CollectionUtil.iteratorToList( tableEnv.executeSql( "show partitions tbl partition (de='2020-04-30',city='china')") .collect()); } catch (TableException e) { - assertEquals( - String.format( - "Could not execute SHOW PARTITIONS %s.%s PARTITION (de=2020-04-30, city=china)", - hiveCatalog.getName(), tablePath), - e.getMessage()); + assertThat(e) + .hasMessage( + String.format( + "Could not execute SHOW PARTITIONS %s.%s PARTITION (de=2020-04-30, city=china)", + hiveCatalog.getName(), tablePath)); } tableEnv.executeSql( "alter table tbl drop partition (dt='2020-04-30',country='china'),partition (dt='2020-04-30',country='us')"); - assertEquals(0, hiveCatalog.listPartitions(tablePath).size()); + assertThat(hiveCatalog.listPartitions(tablePath)).isEmpty(); tableEnv.executeSql("drop table tbl"); tableEnv.executeSql( @@ -781,30 +768,30 @@ public void testShowPartitions() throws Exception { partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect()); - assertEquals(2, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china")); - assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us")); + assertThat(partitions).hasSize(2); + assertThat(partitions.toString()).contains("dt=2020-04-30 01:02:03/country=china"); + assertThat(partitions.toString()).contains("dt=2020-04-30 04:05:06/country=us"); partitions = CollectionUtil.iteratorToList( tableEnv.executeSql( "show partitions tbl partition (dt='2020-04-30 01:02:03')") .collect()); - assertEquals(1, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china")); + assertThat(partitions).hasSize(1); + assertThat(partitions.toString()).contains("dt=2020-04-30 01:02:03/country=china"); partitions = CollectionUtil.iteratorToList( tableEnv.executeSql( "show partitions tbl partition (dt='2020-04-30 04:05:06')") .collect()); - assertEquals(1, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us")); + assertThat(partitions).hasSize(1); + assertThat(partitions.toString()).contains("dt=2020-04-30 04:05:06/country=us"); partitions = CollectionUtil.iteratorToList( tableEnv.executeSql( "show partitions tbl partition (dt='2020-04-30 01:02:03',country='china')") .collect()); - assertEquals(1, partitions.size()); - assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china")); + assertThat(partitions).hasSize(1); + assertThat(partitions.toString()).contains("dt=2020-04-30 01:02:03/country=china"); } @Test @@ -835,9 +822,9 @@ private void verifyUnsupportedOperation(String ddl) { fail("We don't support " + ddl); } catch (ValidationException e) { // expected - assertTrue( - "Expect UnsupportedOperationException for " + ddl, - e.getCause() instanceof UnsupportedOperationException); + assertThat(e.getCause()) + .as("Expect UnsupportedOperationException for " + ddl) + .isInstanceOf(UnsupportedOperationException.class); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectQueryITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectQueryITCase.java index b815bde0763dc..a41686bdd4734 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectQueryITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectQueryITCase.java @@ -50,7 +50,7 @@ import java.util.List; import java.util.stream.Collectors; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test hive query compatibility. */ public class HiveDialectQueryITCase { @@ -187,59 +187,59 @@ public void testGroupingSets() throws Exception { .sorted() .collect(Collectors.toList()); if (HiveParserUtils.legacyGrouping(hiveCatalog.getHiveConf())) { - assertEquals( - "[" - + "+I[1, 1, 3, 1]," - + " +I[1, null, 1, 1]," - + " +I[2, 2, 3, 1]," - + " +I[2, null, 1, 1]," - + " +I[3, 3, 3, 1]," - + " +I[3, null, 1, 1]," - + " +I[4, 4, 3, 1]," - + " +I[4, null, 1, 1]," - + " +I[5, 5, 3, 1]," - + " +I[5, null, 1, 1]]", - results1.toString()); - assertEquals( - "[" - + "+I[1, 1, 1, 1]," - + " +I[1, null, 1, 1]," - + " +I[2, 2, 1, 1]," - + " +I[2, null, 1, 1]," - + " +I[3, 3, 1, 1]," - + " +I[3, null, 1, 1]," - + " +I[4, 4, 1, 1]," - + " +I[4, null, 1, 1]," - + " +I[5, 5, 1, 1]," - + " +I[5, null, 1, 1]]", - results2.toString()); + assertThat(results1.toString()) + .isEqualTo( + "[" + + "+I[1, 1, 3, 1]," + + " +I[1, null, 1, 1]," + + " +I[2, 2, 3, 1]," + + " +I[2, null, 1, 1]," + + " +I[3, 3, 3, 1]," + + " +I[3, null, 1, 1]," + + " +I[4, 4, 3, 1]," + + " +I[4, null, 1, 1]," + + " +I[5, 5, 3, 1]," + + " +I[5, null, 1, 1]]"); + assertThat(results2.toString()) + .isEqualTo( + "[" + + "+I[1, 1, 1, 1]," + + " +I[1, null, 1, 1]," + + " +I[2, 2, 1, 1]," + + " +I[2, null, 1, 1]," + + " +I[3, 3, 1, 1]," + + " +I[3, null, 1, 1]," + + " +I[4, 4, 1, 1]," + + " +I[4, null, 1, 1]," + + " +I[5, 5, 1, 1]," + + " +I[5, null, 1, 1]]"); } else { - assertEquals( - "[" - + "+I[1, 1, 0, 1]," - + " +I[1, null, 1, 1]," - + " +I[2, 2, 0, 1]," - + " +I[2, null, 1, 1]," - + " +I[3, 3, 0, 1]," - + " +I[3, null, 1, 1]," - + " +I[4, 4, 0, 1]," - + " +I[4, null, 1, 1]," - + " +I[5, 5, 0, 1]," - + " +I[5, null, 1, 1]]", - results1.toString()); - assertEquals( - "[" - + "+I[1, 1, 0, 1]," - + " +I[1, null, 0, 1]," - + " +I[2, 2, 0, 1]," - + " +I[2, null, 0, 1]," - + " +I[3, 3, 0, 1]," - + " +I[3, null, 0, 1]," - + " +I[4, 4, 0, 1]," - + " +I[4, null, 0, 1]," - + " +I[5, 5, 0, 1]," - + " +I[5, null, 0, 1]]", - results2.toString()); + assertThat(results1.toString()) + .isEqualTo( + "[" + + "+I[1, 1, 0, 1]," + + " +I[1, null, 1, 1]," + + " +I[2, 2, 0, 1]," + + " +I[2, null, 1, 1]," + + " +I[3, 3, 0, 1]," + + " +I[3, null, 1, 1]," + + " +I[4, 4, 0, 1]," + + " +I[4, null, 1, 1]," + + " +I[5, 5, 0, 1]," + + " +I[5, null, 1, 1]]"); + assertThat(results2.toString()) + .isEqualTo( + "[" + + "+I[1, 1, 0, 1]," + + " +I[1, null, 0, 1]," + + " +I[2, 2, 0, 1]," + + " +I[2, null, 0, 1]," + + " +I[3, 3, 0, 1]," + + " +I[3, null, 0, 1]," + + " +I[4, 4, 0, 1]," + + " +I[4, null, 0, 1]," + + " +I[5, 5, 0, 1]," + + " +I[5, null, 0, 1]]"); } } @@ -260,29 +260,29 @@ public void testGroupingID() throws Exception { if (HiveParserUtils.legacyGrouping(hiveCatalog.getHiveConf())) { // the grouping function in older version (2.2.0) hive has some serious bug and is // barely usable, therefore we only care about the group__id here - assertEquals( - "[" - + "+I[1, 2, 3, 7, 1, 1]," - + " +I[1, 2, null, 3, 1, 0]," - + " +I[1, null, 3, 5, 1, 1]," - + " +I[1, null, null, 1, 1, 0]," - + " +I[null, 2, 3, 6, 0, 1]," - + " +I[null, 2, null, 2, 0, 0]," - + " +I[null, null, 3, 4, 0, 1]," - + " +I[null, null, null, 0, 0, 0]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[" + + "+I[1, 2, 3, 7, 1, 1]," + + " +I[1, 2, null, 3, 1, 0]," + + " +I[1, null, 3, 5, 1, 1]," + + " +I[1, null, null, 1, 1, 0]," + + " +I[null, 2, 3, 6, 0, 1]," + + " +I[null, 2, null, 2, 0, 0]," + + " +I[null, null, 3, 4, 0, 1]," + + " +I[null, null, null, 0, 0, 0]]"); } else { - assertEquals( - "[" - + "+I[1, 2, 3, 0, 0, 0]," - + " +I[1, 2, null, 1, 0, 1]," - + " +I[1, null, 3, 2, 0, 0]," - + " +I[1, null, null, 3, 0, 1]," - + " +I[null, 2, 3, 4, 1, 0]," - + " +I[null, 2, null, 5, 1, 1]," - + " +I[null, null, 3, 6, 1, 0]," - + " +I[null, null, null, 7, 1, 1]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[" + + "+I[1, 2, 3, 0, 0, 0]," + + " +I[1, 2, null, 1, 0, 1]," + + " +I[1, null, 3, 2, 0, 0]," + + " +I[1, null, null, 3, 0, 1]," + + " +I[null, 2, 3, 4, 1, 0]," + + " +I[null, 2, null, 5, 1, 1]," + + " +I[null, null, 3, 6, 1, 0]," + + " +I[null, null, null, 7, 1, 1]]"); } } finally { tableEnv.executeSql("drop table temp"); @@ -303,9 +303,9 @@ public void testValues() throws Exception { List result = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from test_values").collect()); - assertEquals( - "[+I[1, -2, 3, 4, 1.1, 1.1, 1.10000, 2021-08-04T16:26:33.400, 2021-08-04, null, 123, 56, false]]", - result.toString()); + assertThat(result.toString()) + .isEqualTo( + "[+I[1, -2, 3, 4, 1.1, 1.1, 1.10000, 2021-08-04T16:26:33.400, 2021-08-04, null, 123, 56, false]]"); } finally { tableEnv.executeSql("drop table test_values"); } @@ -323,7 +323,7 @@ public void testJoinInvolvingComplexType() throws Exception { tableEnv.executeSql( "select * from test2b join test2a on test2b.a = test2a.a[1]") .collect()); - assertEquals("[+I[2, [1, 2]]]", result.toString()); + assertThat(result.toString()).isEqualTo("[+I[2, [1, 2]]]"); } finally { tableEnv.executeSql("drop table test2a"); tableEnv.executeSql("drop table test2b"); @@ -476,7 +476,7 @@ private QTest(List statements, List results, boolean sortResults this.statements = statements; this.results = results; this.sortResults = sortResults; - assertEquals(statements.size(), results.size()); + assertThat(results).hasSize(statements.size()); } } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java index 90e3bc72e7cc9..03f386c375b00 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java @@ -52,10 +52,7 @@ import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_MONITOR_INTERVAL; import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_PARTITION_INCLUDE; import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_PARTITION_ORDER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Unit tests for {@link HiveDynamicTableFactory}. */ public class HiveDynamicTableFactoryTest { @@ -82,13 +79,12 @@ public void testHiveStreamingSourceOptions() throws Exception { + " tblproperties ('%s' = 'true')", STREAMING_SOURCE_ENABLE.key())); DynamicTableSource tableSource1 = getTableSource("table1"); - assertFalse(tableSource1 instanceof HiveLookupTableSource); + assertThat(tableSource1).isNotInstanceOf(HiveLookupTableSource.class); HiveTableSource tableSource = (HiveTableSource) tableSource1; Configuration configuration = new Configuration(); tableSource.catalogTable.getOptions().forEach(configuration::setString); - assertEquals( - HiveOptions.PartitionOrder.PARTITION_NAME, - configuration.get(STREAMING_SOURCE_PARTITION_ORDER)); + assertThat(configuration.get(STREAMING_SOURCE_PARTITION_ORDER)) + .isEqualTo(HiveOptions.PartitionOrder.PARTITION_NAME); // test table can't be selected when set 'streaming-source.partition.include' to 'latest' tableEnv.executeSql( @@ -98,16 +94,16 @@ public void testHiveStreamingSourceOptions() throws Exception { + " tblproperties ('%s' = 'true', '%s' = 'latest')", STREAMING_SOURCE_ENABLE.key(), STREAMING_SOURCE_PARTITION_INCLUDE.key())); DynamicTableSource tableSource2 = getTableSource("table2"); - assertTrue(tableSource2 instanceof HiveLookupTableSource); + assertThat(tableSource2).isInstanceOf(HiveLookupTableSource.class); try { tableEnv.executeSql("select * from table2"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, "The only supported 'streaming-source.partition.include' is 'all' in" - + " hive table scan, but is 'latest'") - .isPresent()); + + " hive table scan, but is 'latest'")) + .isPresent(); } // test table support 'partition-name' in option 'streaming-source.partition.order'. @@ -118,12 +114,12 @@ public void testHiveStreamingSourceOptions() throws Exception { + " tblproperties ('%s' = 'true', '%s' = 'partition-name')", STREAMING_SOURCE_ENABLE.key(), STREAMING_SOURCE_PARTITION_ORDER.key())); DynamicTableSource tableSource3 = getTableSource("table3"); - assertTrue(tableSource3 instanceof HiveTableSource); + assertThat(tableSource3).isInstanceOf(HiveTableSource.class); HiveTableSource hiveTableSource3 = (HiveTableSource) tableSource3; Configuration configuration1 = new Configuration(); hiveTableSource3.catalogTable.getOptions().forEach(configuration1::setString); PartitionOrder partitionOrder1 = configuration1.get(STREAMING_SOURCE_PARTITION_ORDER); - assertEquals(HiveOptions.PartitionOrder.PARTITION_NAME, partitionOrder1); + assertThat(partitionOrder1).isEqualTo(HiveOptions.PartitionOrder.PARTITION_NAME); // test deprecated option key 'streaming-source.consume-order' and new key // 'streaming-source.partition-order' @@ -134,13 +130,13 @@ public void testHiveStreamingSourceOptions() throws Exception { + " tblproperties ('%s' = 'true', '%s' = 'partition-time')", STREAMING_SOURCE_ENABLE.key(), "streaming-source.consume-order")); DynamicTableSource tableSource4 = getTableSource("table4"); - assertTrue(tableSource4 instanceof HiveTableSource); + assertThat(tableSource4).isInstanceOf(HiveTableSource.class); HiveTableSource hiveTableSource = (HiveTableSource) tableSource4; Configuration configuration2 = new Configuration(); hiveTableSource.catalogTable.getOptions().forEach(configuration2::setString); PartitionOrder partitionOrder2 = configuration2.get(STREAMING_SOURCE_PARTITION_ORDER); - assertEquals(HiveOptions.PartitionOrder.PARTITION_TIME, partitionOrder2); + assertThat(partitionOrder2).isEqualTo(HiveOptions.PartitionOrder.PARTITION_TIME); } @Test @@ -151,7 +147,7 @@ public void testHiveLookupSourceOptions() throws Exception { "create table table5 (x int, y string, z int) tblproperties ('%s'='5min')", LOOKUP_JOIN_CACHE_TTL.key())); DynamicTableSource tableSource1 = getTableSource("table5"); - assertTrue(tableSource1 instanceof HiveLookupTableSource); + assertThat(tableSource1).isInstanceOf(HiveLookupTableSource.class); // test hive streaming source is a lookup source when 'streaming-source.partition.include' = // 'latest' @@ -161,18 +157,17 @@ public void testHiveLookupSourceOptions() throws Exception { + " tblproperties ('%s' = 'true', '%s' = 'latest')", STREAMING_SOURCE_ENABLE.key(), STREAMING_SOURCE_PARTITION_INCLUDE.key())); DynamicTableSource tableSource2 = getTableSource("table6"); - assertTrue(tableSource2 instanceof HiveLookupTableSource); + assertThat(tableSource2).isInstanceOf(HiveLookupTableSource.class); FileSystemLookupFunction lookupFunction = (FileSystemLookupFunction) ((HiveLookupTableSource) tableSource2).getLookupFunction(new int[][] {{0}}); // test default lookup cache ttl for streaming-source is 1 hour - assertEquals(Duration.ofHours(1), lookupFunction.getReloadInterval()); + assertThat(lookupFunction.getReloadInterval()).isEqualTo(Duration.ofHours(1)); HiveLookupTableSource lookupTableSource = (HiveLookupTableSource) tableSource2; Configuration configuration = new Configuration(); lookupTableSource.catalogTable.getOptions().forEach(configuration::setString); - assertEquals( - configuration.get(STREAMING_SOURCE_PARTITION_ORDER), - HiveOptions.PartitionOrder.PARTITION_NAME); + assertThat(HiveOptions.PartitionOrder.PARTITION_NAME) + .isEqualTo(configuration.get(STREAMING_SOURCE_PARTITION_ORDER)); // test lookup with partition-time extractor options tableEnv.executeSql( @@ -193,16 +188,16 @@ public void testHiveLookupSourceOptions() throws Exception { PARTITION_TIME_EXTRACTOR_CLASS.key())); DynamicTableSource tableSource3 = getTableSource("table7"); - assertTrue(tableSource3 instanceof HiveLookupTableSource); + assertThat(tableSource3).isInstanceOf(HiveLookupTableSource.class); HiveLookupTableSource tableSource = (HiveLookupTableSource) tableSource3; Configuration configuration1 = new Configuration(); tableSource.catalogTable.getOptions().forEach(configuration1::setString); - assertEquals( - configuration1.get(STREAMING_SOURCE_PARTITION_ORDER), - HiveOptions.PartitionOrder.PARTITION_TIME); - assertEquals(configuration1.get(PARTITION_TIME_EXTRACTOR_KIND), "custom"); - assertEquals(configuration1.get(PARTITION_TIME_EXTRACTOR_CLASS), "path.to..TimeExtractor"); + assertThat(HiveOptions.PartitionOrder.PARTITION_TIME) + .isEqualTo(configuration1.get(STREAMING_SOURCE_PARTITION_ORDER)); + assertThat("custom").isEqualTo(configuration1.get(PARTITION_TIME_EXTRACTOR_KIND)); + assertThat("path.to..TimeExtractor") + .isEqualTo(configuration1.get(PARTITION_TIME_EXTRACTOR_CLASS)); tableEnv.executeSql( String.format( @@ -212,11 +207,12 @@ public void testHiveLookupSourceOptions() throws Exception { STREAMING_SOURCE_PARTITION_INCLUDE.key(), STREAMING_SOURCE_MONITOR_INTERVAL.key())); DynamicTableSource tableSource4 = getTableSource("table8"); - assertTrue(tableSource4 instanceof HiveLookupTableSource); + assertThat(tableSource4).isInstanceOf(HiveLookupTableSource.class); HiveLookupTableSource lookupTableSource4 = (HiveLookupTableSource) tableSource4; Configuration configuration4 = new Configuration(); lookupTableSource4.catalogTable.getOptions().forEach(configuration4::setString); - assertEquals(configuration4.get(STREAMING_SOURCE_MONITOR_INTERVAL), Duration.ofMinutes(5L)); + assertThat(Duration.ofMinutes(5L)) + .isEqualTo(configuration4.get(STREAMING_SOURCE_MONITOR_INTERVAL)); } @Test @@ -233,12 +229,12 @@ public void testInvalidOptions() throws Exception { try { getTableSource("table9"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, "The 'streaming-source.consume-start-offset' is not supported when " - + "set 'streaming-source.partition.include' to 'latest'") - .isPresent()); + + "set 'streaming-source.partition.include' to 'latest'")) + .isPresent(); } } @@ -266,17 +262,17 @@ public void testJobConfWithCredentials() throws Exception { HiveTableSource tableSource = (HiveTableSource) tableSource1; Token token = tableSource.getJobConf().getCredentials().getToken(hdfsDelegationTokenService); - assertNotNull(token); - assertEquals(hdfsDelegationTokenKind, token.getKind()); - assertEquals(hdfsDelegationTokenService, token.getService()); + assertThat(token).isNotNull(); + assertThat(token.getKind()).isEqualTo(hdfsDelegationTokenKind); + assertThat(token.getService()).isEqualTo(hdfsDelegationTokenService); // test table sink's jobConf with credentials DynamicTableSink tableSink1 = getTableSink("table10"); HiveTableSink tableSink = (HiveTableSink) tableSink1; token = tableSink.getJobConf().getCredentials().getToken(hdfsDelegationTokenService); - assertNotNull(token); - assertEquals(hdfsDelegationTokenKind, token.getKind()); - assertEquals(hdfsDelegationTokenService, token.getService()); + assertThat(token).isNotNull(); + assertThat(token.getKind()).isEqualTo(hdfsDelegationTokenKind); + assertThat(token.getService()).isEqualTo(hdfsDelegationTokenService); } private DynamicTableSource getTableSource(String tableName) throws Exception { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveLookupJoinITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveLookupJoinITCase.java index 38834a123c3b3..5fd2a56269aa9 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveLookupJoinITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveLookupJoinITCase.java @@ -56,8 +56,7 @@ import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_MONITOR_INTERVAL; import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_PARTITION_INCLUDE; import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_PARTITION_ORDER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test lookup join of hive tables. */ public class HiveLookupJoinITCase { @@ -170,8 +169,8 @@ public void testLookupOptions() throws Exception { lookupFunction2.open(null); // verify lookup cache TTL option is properly configured - assertEquals(Duration.ofMinutes(5), lookupFunction1.getReloadInterval()); - assertEquals(Duration.ofMinutes(120), lookupFunction2.getReloadInterval()); + assertThat(lookupFunction1.getReloadInterval()).isEqualTo(Duration.ofMinutes(5)); + assertThat(lookupFunction2.getReloadInterval()).isEqualTo(Duration.ofMinutes(120)); } @Test @@ -196,7 +195,7 @@ public void testPartitionFetcherAndReader() throws Exception { PartitionFetcher.Context context = lookupFunction.getFetcherContext(); List partitions = fetcher.fetch(context); // fetch latest partition by partition-name - assertEquals(1, partitions.size()); + assertThat(partitions).hasSize(1); PartitionReader reader = lookupFunction.getPartitionReader(); reader.open(partitions); @@ -216,7 +215,7 @@ public void testPartitionFetcherAndReader() throws Exception { res.add(serializer.copy(row)); } res.sort(Comparator.comparingInt(o -> o.getInt(0))); - assertEquals("[+I(3,c,33,2020,09,31)]", res.toString()); + assertThat(res.toString()).isEqualTo("[+I(3,c,33,2020,09,31)]"); } @Test @@ -232,7 +231,7 @@ public void testLookupJoinBoundedTable() throws Exception { + " default_catalog.default_database.probe as p " + " join bounded_table for system_time as of p.p as b on p.x=b.x and p.y=b.y"); List results = CollectionUtil.iteratorToList(flinkTable.execute().collect()); - assertEquals("[+I[1, a, 10], +I[2, b, 22], +I[3, c, 33]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, a, 10], +I[2, b, 22], +I[3, c, 33]]"); } @Test @@ -256,9 +255,9 @@ public void testLookupJoinBoundedPartitionedTable() throws Exception { + " default_catalog.default_database.probe as p" + " join bounded_partition_table for system_time as of p.p as b on p.x=b.x and p.y=b.y"); List results = CollectionUtil.iteratorToList(flinkTable.execute().collect()); - assertEquals( - "[+I[1, a, 8, 2019, 08, 01], +I[1, a, 10, 2020, 08, 31], +I[2, b, 22, 2020, 08, 31]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[1, a, 8, 2019, 08, 01], +I[1, a, 10, 2020, 08, 31], +I[2, b, 22, 2020, 08, 31]]"); } @Test @@ -286,9 +285,9 @@ public void testLookupJoinPartitionedTable() throws Exception { + " default_catalog.default_database.probe as p" + " join partition_table_1 for system_time as of p.p as b on p.x=b.x and p.y=b.y"); List results = CollectionUtil.iteratorToList(flinkTable.execute().collect()); - assertEquals( - "[+I[1, a, 10, 2020, 09, 31], +I[2, b, 22, 2020, 09, 31], +I[3, c, 33, 2020, 09, 31]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[1, a, 10, 2020, 09, 31], +I[2, b, 22, 2020, 09, 31], +I[3, c, 33, 2020, 09, 31]]"); } @Test @@ -316,8 +315,8 @@ public void testLookupJoinPartitionedTableWithPartitionTime() throws Exception { + " default_catalog.default_database.probe as p" + " join partition_table_2 for system_time as of p.p as b on p.x=b.x and p.y=b.y"); List results = CollectionUtil.iteratorToList(flinkTable.execute().collect()); - assertEquals( - "[+I[1, a, 10, 2020, 08, 31], +I[2, b, 22, 2020, 08, 31]]", results.toString()); + assertThat(results.toString()) + .isEqualTo("[+I[1, a, 10, 2020, 08, 31], +I[2, b, 22, 2020, 08, 31]]"); } @Test @@ -354,8 +353,8 @@ public void testLookupJoinPartitionedTableWithCreateTime() throws Exception { + " default_catalog.default_database.probe as p" + " join partition_table_3 for system_time as of p.p as b on p.x=b.x and p.y=b.y"); List results = CollectionUtil.iteratorToList(flinkTable.execute().collect()); - assertEquals( - "[+I[1, a, 101, 2020, 08, 01], +I[2, b, 122, 2020, 08, 01]]", results.toString()); + assertThat(results.toString()) + .isEqualTo("[+I[1, a, 101, 2020, 08, 01], +I[2, b, 122, 2020, 08, 01]]"); } @Test @@ -385,9 +384,10 @@ public void testLookupJoinTableWithColumnarStorage() throws Exception { + "left join columnar_table for system_time as of t.p c " + "on t.x = c.x where c.x is null"); List results = CollectionUtil.iteratorToList(flinkTable.execute().collect()); - assertTrue( - "All records should be able to be joined, and the final results should be empty.", - results.size() == 0); + assertThat(results) + .as( + "All records should be able to be joined, and the final results should be empty.") + .isEmpty(); } private FileSystemLookupFunction getLookupFunction(String tableName) diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveOutputFormatFactoryTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveOutputFormatFactoryTest.java index b47296fc8dcea..50f18495d4a0e 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveOutputFormatFactoryTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveOutputFormatFactoryTest.java @@ -40,7 +40,7 @@ import java.util.Collections; import java.util.Properties; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for HiveOutputFormatFactory. */ public class HiveOutputFormatFactoryTest { @@ -82,8 +82,8 @@ public FileSinkOperator.RecordWriter getHiveRecordWriter( Progressable progress) throws IOException { URI uri = finalOutPath.toUri(); - assertEquals(TEST_URI_SCHEME, uri.getScheme()); - assertEquals(TEST_URI_AUTHORITY, uri.getAuthority()); + assertThat(uri.getScheme()).isEqualTo(TEST_URI_SCHEME); + assertThat(uri.getAuthority()).isEqualTo(TEST_URI_AUTHORITY); return null; } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java index 657fd82a024b1..69a292a6efa3c 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java @@ -63,8 +63,7 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_IN_TEST; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TXN_MANAGER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** * Tests that need to run with hive runner. Since hive runner is heavy, make sure to add test cases @@ -161,8 +160,9 @@ public void testWriteComplexType() throws Exception { "insert into dest select * from default_catalog.default_database.complexSrc") .await(); List result = hiveShell.executeQuery("select * from dest"); - assertEquals(1, result.size()); - assertEquals("[1,2,3]\t{1:\"a\",2:\"b\"}\t{\"f1\":3,\"f2\":\"c\"}", result.get(0)); + assertThat(result).hasSize(1); + assertThat(result.get(0)) + .isEqualTo("[1,2,3]\t{1:\"a\",2:\"b\"}\t{\"f1\":3,\"f2\":\"c\"}"); } finally { tableEnv.executeSql("drop table dest"); } @@ -197,10 +197,10 @@ public void testWriteNestedComplexType() throws Exception { "insert into dest select * from default_catalog.default_database.nestedSrc") .await(); List result = hiveShell.executeQuery("select * from dest"); - assertEquals(1, result.size()); - assertEquals( - "[{\"f1\":1,\"f2\":\"a\"},{\"f1\":2,\"f2\":\"b\"},{\"f1\":3,\"f2\":\"c\"}]", - result.get(0)); + assertThat(result).hasSize(1); + assertThat(result.get(0)) + .isEqualTo( + "[{\"f1\":1,\"f2\":\"a\"},{\"f1\":2,\"f2\":\"b\"},{\"f1\":3,\"f2\":\"c\"}]"); } finally { tableEnv.executeSql("drop table dest"); } @@ -229,11 +229,11 @@ public void testWriteNullValues() throws Exception { tableEnv.executeSql("insert into db1.dest select * from db1.src").await(); List results = hiveShell.executeQuery("select * from db1.dest"); - assertEquals(1, results.size()); + assertThat(results).hasSize(1); String[] cols = results.get(0).split("\t"); - assertEquals(17, cols.length); - assertEquals("NULL", cols[0]); - assertEquals(1, new HashSet<>(Arrays.asList(cols)).size()); + assertThat(cols).hasSize(17); + assertThat(cols[0]).isEqualTo("NULL"); + assertThat(new HashSet<>(Arrays.asList(cols))).hasSize(1); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -336,7 +336,7 @@ public void testStaticPartition() throws Exception { tableEnv.executeSql( "insert into db1.dest partition (p1='1\\'1', p2=1.1) select x from db1.src") .await(); - assertEquals(1, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size()); + assertThat(hiveCatalog.listPartitions(new ObjectPath("db1", "dest"))).hasSize(1); verifyHiveQueryResult( "select * from db1.dest", Arrays.asList("1\t1'1\t1.1", "2\t1'1\t1.1")); } finally { @@ -358,7 +358,7 @@ public void testDynamicPartition() throws Exception { tableEnv.executeSql( "create table db1.dest (x int) partitioned by (p1 string, p2 double)"); tableEnv.executeSql("insert into db1.dest select * from db1.src").await(); - assertEquals(3, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size()); + assertThat(hiveCatalog.listPartitions(new ObjectPath("db1", "dest"))).hasSize(3); verifyHiveQueryResult( "select * from db1.dest", Arrays.asList("1\ta\t1.1", "2\ta\t2.2", "3\tb\t3.3")); } finally { @@ -381,7 +381,7 @@ public void testPartialDynamicPartition() throws Exception { tableEnv.executeSql( "insert into db1.dest partition (p1=1.1,p2) select x,y from db1.src") .await(); - assertEquals(2, hiveCatalog.listPartitions(new ObjectPath("db1", "dest")).size()); + assertThat(hiveCatalog.listPartitions(new ObjectPath("db1", "dest"))).hasSize(2); verifyHiveQueryResult( "select * from db1.dest", Arrays.asList("1\t1.1\ta", "2\t1.1\tb")); } finally { @@ -414,11 +414,10 @@ public void testTimestamp() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from db1.src").execute().collect()); - assertEquals(2, results.size()); - assertEquals(LocalDateTime.of(2019, 11, 11, 0, 0), results.get(0).getField(0)); - assertEquals( - LocalDateTime.of(2019, 12, 3, 15, 43, 32, 123456789), - results.get(1).getField(0)); + assertThat(results).hasSize(2); + assertThat(results.get(0).getField(0)).isEqualTo(LocalDateTime.of(2019, 11, 11, 0, 0)); + assertThat(results.get(1).getField(0)) + .isEqualTo(LocalDateTime.of(2019, 12, 3, 15, 43, 32, 123456789)); // test write timestamp to hive tableEnv.executeSql("insert into db1.dest select max(ts) from db1.src").await(); verifyHiveQueryResult( @@ -444,9 +443,9 @@ public void testDate() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from db1.src").execute().collect()); - assertEquals(2, results.size()); - assertEquals(LocalDate.of(2019, 12, 9), results.get(0).getField(0)); - assertEquals(LocalDate.of(2019, 12, 12), results.get(1).getField(0)); + assertThat(results).hasSize(2); + assertThat(results.get(0).getField(0)).isEqualTo(LocalDate.of(2019, 12, 9)); + assertThat(results.get(1).getField(0)).isEqualTo(LocalDate.of(2019, 12, 12)); // test write date to hive tableEnv.executeSql("insert into db1.dest select max(dt) from db1.src").await(); verifyHiveQueryResult( @@ -486,15 +485,16 @@ public void testViews() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select count(v) from db1.v1").execute().collect()); - assertEquals("[+I[2]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[2]]"); results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from db1.v2").execute().collect()); - assertEquals("[+I[1, 3], +I[3, 2]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, 3], +I[3, 2]]"); results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from db1.v3").execute().collect()); - assertEquals("[+I[1, key1, 3], +I[2, key2, 1], +I[3, key3, 2]]", results.toString()); + assertThat(results.toString()) + .isEqualTo("[+I[1, key1, 3], +I[2, key2, 1], +I[3, key3, 2]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -510,9 +510,8 @@ public void testWhitespacePartValue() throws Exception { stmtSet.addInsertSql("insert into db1.dest select 1,' '"); stmtSet.addInsertSql("insert into db1.dest select 2,'a \t'"); stmtSet.execute().await(); - assertEquals( - "[p= , p=a %09]", - hiveShell.executeQuery("show partitions db1.dest").toString()); + assertThat(hiveShell.executeQuery("show partitions db1.dest").toString()) + .isEqualTo("[p= , p=a %09]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -542,18 +541,22 @@ public void testOrcSchemaEvol() throws Exception { tableEnv.getConfig().set(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER, true); tableEnv.executeSql("alter table db1.src change x x int"); - assertEquals( - "[+I[1, 100], +I[2, 200]]", - CollectionUtil.iteratorToList( - tableEnv.sqlQuery("select * from db1.src").execute().collect()) - .toString()); + assertThat( + CollectionUtil.iteratorToList( + tableEnv.sqlQuery("select * from db1.src") + .execute() + .collect()) + .toString()) + .isEqualTo("[+I[1, 100], +I[2, 200]]"); tableEnv.executeSql("alter table db1.src change y y string"); - assertEquals( - "[+I[1, 100], +I[2, 200]]", - CollectionUtil.iteratorToList( - tableEnv.sqlQuery("select * from db1.src").execute().collect()) - .toString()); + assertThat( + CollectionUtil.iteratorToList( + tableEnv.sqlQuery("select * from db1.src") + .execute() + .collect()) + .toString()) + .isEqualTo("[+I[1, 100], +I[2, 200]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -578,13 +581,13 @@ private void testTransactionalTable(boolean batch) { } catch (Exception e) { exceptions.add(e); } - assertEquals(2, exceptions.size()); + assertThat(exceptions).hasSize(2); exceptions.forEach( e -> { - assertTrue(e instanceof FlinkHiveException); - assertEquals( - "Reading or writing ACID table db1.dest is not supported.", - e.getMessage()); + assertThat(e).isInstanceOf(FlinkHiveException.class); + assertThat(e.getMessage()) + .isEqualTo( + "Reading or writing ACID table db1.dest is not supported."); }); } finally { tableEnv.executeSql("drop database db1 cascade"); @@ -703,13 +706,13 @@ private void readWriteFormat(String format) throws Exception { private static void verifyWrittenData(List expected, List results) throws Exception { - assertEquals(expected.size(), results.size()); + assertThat(results).hasSize(expected.size()); Set expectedSet = new HashSet<>(); for (int i = 0; i < results.size(); i++) { final String rowString = expected.get(i).toString(); expectedSet.add(rowString.substring(3, rowString.length() - 1).replaceAll(", ", "\t")); } - assertEquals(expectedSet, new HashSet<>(results)); + assertThat(new HashSet<>(results)).isEqualTo(expectedSet); } private static List generateRecords(int numRecords) { @@ -728,8 +731,8 @@ private static List generateRecords(int numRecords) { private static void verifyHiveQueryResult(String query, List expected) { List results = hiveShell.executeQuery(query); - assertEquals(expected.size(), results.size()); - assertEquals(new HashSet<>(expected), new HashSet<>(results)); + assertThat(results).hasSize(expected.size()); + assertThat(new HashSet<>(results)).isEqualTo(new HashSet<>(expected)); } private static void verifyFlinkQueryResult( @@ -750,8 +753,8 @@ private static void verifyFlinkQueryResult( .map(Object::toString) .collect(Collectors.joining("\t"))) .collect(Collectors.toList()); - assertEquals(expected.size(), results.size()); - assertEquals(new HashSet<>(expected), new HashSet<>(results)); + assertThat(results).hasSize(expected.size()); + assertThat(new HashSet<>(results)).isEqualTo(new HashSet<>(expected)); } private static String toRowValue(List row) { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveSourceITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveSourceITCase.java index 7a7e580171907..64fd238cc3058 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveSourceITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveSourceITCase.java @@ -44,7 +44,7 @@ import static org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable.IDENTIFIER; import static org.apache.flink.table.factories.FactoryUtil.CONNECTOR; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link HiveSource}. */ public class HiveSourceITCase { @@ -102,9 +102,9 @@ public void testRegularRead() throws Exception { WatermarkStrategy.noWatermarks(), "HiveSource-tbl1") .executeAndCollect()); - assertEquals(2, results.size()); - assertEquals(1, results.get(0).getInt(0)); - assertEquals(2, results.get(1).getInt(0)); + assertThat(results).hasSize(2); + assertThat(results.get(0).getInt(0)).isEqualTo(1); + assertThat(results.get(1).getInt(0)).isEqualTo(2); hiveCatalog.dropTable(tablePath, false); // test partitioned table @@ -143,9 +143,9 @@ public void testRegularRead() throws Exception { WatermarkStrategy.noWatermarks(), "HiveSource-tbl2") .executeAndCollect()); - assertEquals(1, results.size()); - assertEquals(1, results.get(0).getInt(0)); - assertEquals("a", results.get(0).getString(1).toString()); + assertThat(results).hasSize(1); + assertThat(results.get(0).getInt(0)).isEqualTo(1); + assertThat(results.get(0).getString(1).toString()).isEqualTo("a"); HiveTestUtils.createTextTableInserter( hiveCatalog, tablePath.getDatabaseName(), tablePath.getObjectName()) @@ -178,9 +178,9 @@ public void testRegularRead() throws Exception { WatermarkStrategy.noWatermarks(), "HiveSource-tbl2") .executeAndCollect()); - assertEquals(1, results.size()); - assertEquals(3, results.get(0).getInt(0)); - assertEquals("b", results.get(0).getString(1).toString()); + assertThat(results).hasSize(1); + assertThat(results.get(0).getInt(0)).isEqualTo(3); + assertThat(results.get(0).getString(1).toString()).isEqualTo("b"); hiveCatalog.dropTable(tablePath, false); } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableFactoryTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableFactoryTest.java index 506dafbc491bd..612a96fe1eef6 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableFactoryTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableFactoryTest.java @@ -54,7 +54,7 @@ import java.util.Map; import java.util.Optional; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link HiveTableFactory}. */ public class HiveTableFactoryTest { @@ -87,7 +87,7 @@ public void testGenericTable() throws Exception { catalog.createTable(new ObjectPath("mydb", "mytable"), table, true); final Optional tableFactoryOpt = catalog.getTableFactory(); - assertTrue(tableFactoryOpt.isPresent()); + assertThat(tableFactoryOpt).isPresent(); final HiveTableFactory tableFactory = (HiveTableFactory) tableFactoryOpt.get(); final TableSource tableSource = @@ -97,7 +97,7 @@ public void testGenericTable() throws Exception { table, new Configuration(), false)); - assertTrue(tableSource instanceof StreamTableSource); + assertThat(tableSource).isInstanceOf(StreamTableSource.class); final TableSink tableSink = tableFactory.createTableSink( @@ -107,7 +107,7 @@ public void testGenericTable() throws Exception { new Configuration(), true, false)); - assertTrue(tableSink instanceof StreamTableSink); + assertThat(tableSink).isInstanceOf(StreamTableSink.class); } @Test @@ -135,7 +135,7 @@ public void testHiveTable() throws Exception { new Configuration(), Thread.currentThread().getContextClassLoader(), false); - assertTrue(tableSource instanceof HiveTableSource); + assertThat(tableSource).isInstanceOf(HiveTableSource.class); final DynamicTableSink tableSink = FactoryUtil.createDynamicTableSink( @@ -146,6 +146,6 @@ public void testHiveTable() throws Exception { new Configuration(), Thread.currentThread().getContextClassLoader(), false); - assertTrue(tableSink instanceof HiveTableSink); + assertThat(tableSink).isInstanceOf(HiveTableSink.class); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java index 39f7950acbc72..e5dc44ca25f7e 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java @@ -44,7 +44,6 @@ import org.apache.flink.shaded.guava30.com.google.common.collect.Lists; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -72,9 +71,8 @@ import static org.apache.flink.table.planner.utils.TableTestUtil.replaceNodeIdInOperator; import static org.apache.flink.table.planner.utils.TableTestUtil.replaceStageId; import static org.apache.flink.table.planner.utils.TableTestUtil.replaceStreamNodeId; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests {@link HiveTableSink}. */ public class HiveTableSinkITCase { @@ -131,9 +129,8 @@ private void testHiveTableSinkWithParallelismBase( "insert into test_table select 1, 1", ExplainDetail.JSON_EXECUTION_PLAN); final String expected = readFromResource(expectedResourceFileName); - assertEquals( - replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(expected))), - replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(actual)))); + assertThat(replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(actual)))) + .isEqualTo(replaceNodeIdInOperator(replaceStreamNodeId(replaceStageId(expected)))); tEnv.executeSql("drop database db1 cascade"); } @@ -153,7 +150,7 @@ public void testBatchAppend() throws Exception { CollectionUtil.iteratorToList( tEnv.executeSql("select * from append_table").collect()); rows.sort(Comparator.comparingInt(o -> (int) o.getField(0))); - Assert.assertEquals(Arrays.asList(Row.of(1, 1), Row.of(2, 2)), rows); + assertThat(rows).isEqualTo(Arrays.asList(Row.of(1, 1), Row.of(2, 2))); } finally { tEnv.executeSql("drop database db1 cascade"); } @@ -213,15 +210,13 @@ public void testStreamingAppend() throws Exception { StreamTableEnvironment tEnv = HiveTestUtils.createTableEnvInStreamingMode(env); tEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog); tEnv.useCatalog(hiveCatalog.getName()); - try { tEnv.executeSql( "insert into db1.sink_table select 6,'a','b','2020-05-03','12'") .await(); } catch (Exception e) { - Assert.fail("Failed to execute sql: " + e.getMessage()); + fail("Failed to execute sql: " + e.getMessage()); } - assertBatch( "db1.sink_table", Arrays.asList( @@ -371,11 +366,11 @@ public void testStreamingSinkWithTimestampLtzWatermark() throws Exception { } catch (InterruptedException e) { throw new RuntimeException(e); } - Assert.assertEquals( - Arrays.asList( - Row.of(expectedData.get(i - 1)).toString(), - Row.of(expectedData.get(i - 1)).toString()), - fetchRows(iter, 2)); + assertThat(fetchRows(iter, 2)) + .isEqualTo( + Arrays.asList( + Row.of(expectedData.get(i - 1)).toString(), + Row.of(expectedData.get(i - 1)).toString())); if (i < 6) { HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "source_table") @@ -418,12 +413,11 @@ public void testStreamingSinkWithoutCommitPolicy() throws Exception { fail("Streaming write partitioned table without commit policy should fail"); } catch (FlinkHiveException e) { // expected - assertTrue( - e.getMessage() - .contains( - String.format( - "Streaming write to partitioned hive table `%s`.`%s`.`%s` without providing a commit policy", - hiveCatalog.getName(), "db1", "dest"))); + assertThat(e.getMessage()) + .contains( + String.format( + "Streaming write to partitioned hive table `%s`.`%s`.`%s` without providing a commit policy", + hiveCatalog.getName(), "db1", "dest")); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -437,12 +431,12 @@ public void testCustomPartitionCommitPolicyNotFound() { testStreamingWriteWithCustomPartitionCommitPolicy(customCommitPolicyClassName); fail("ExecutionException expected"); } catch (Exception e) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( e, "Can not create new instance for custom class from " - + customCommitPolicyClassName) - .isPresent()); + + customCommitPolicyClassName)) + .isPresent(); } } @@ -454,7 +448,7 @@ public void testCustomPartitionCommitPolicy() throws Exception { private static List fetchRows(Iterator iter, int size) { List strings = new ArrayList<>(size); for (int i = 0; i < size; i++) { - Assert.assertTrue(iter.hasNext()); + assertThat(iter.hasNext()).isTrue(); strings.add(iter.next().toString()); } strings.sort(String::compareTo); @@ -463,12 +457,12 @@ private static List fetchRows(Iterator iter, int size) { private void checkSuccessFiles(String path) { File basePath = new File(path, "d=2020-05-03"); - Assert.assertEquals(5, basePath.list().length); - Assert.assertTrue(new File(new File(basePath, "e=7"), "_MY_SUCCESS").exists()); - Assert.assertTrue(new File(new File(basePath, "e=8"), "_MY_SUCCESS").exists()); - Assert.assertTrue(new File(new File(basePath, "e=9"), "_MY_SUCCESS").exists()); - Assert.assertTrue(new File(new File(basePath, "e=10"), "_MY_SUCCESS").exists()); - Assert.assertTrue(new File(new File(basePath, "e=11"), "_MY_SUCCESS").exists()); + assertThat(basePath.list()).hasSize(5); + assertThat(new File(new File(basePath, "e=7"), "_MY_SUCCESS").exists()).isTrue(); + assertThat(new File(new File(basePath, "e=8"), "_MY_SUCCESS").exists()).isTrue(); + assertThat(new File(new File(basePath, "e=9"), "_MY_SUCCESS").exists()).isTrue(); + assertThat(new File(new File(basePath, "e=10"), "_MY_SUCCESS").exists()).isTrue(); + assertThat(new File(new File(basePath, "e=11"), "_MY_SUCCESS").exists()).isTrue(); } private void testStreamingWriteWithCustomPartitionCommitPolicy( @@ -546,11 +540,12 @@ private void testStreamingWriteWithCustomPartitionCommitPolicy( partitionKV -> { String partitionPath = new Path(new Path(base, "d=2020-05-03"), partitionKV).toString(); - Assert.assertTrue( - "Partition(d=2020-05-03, " - + partitionKV - + ") is not committed successfully", - committedPaths.contains(partitionPath)); + assertThat(committedPaths) + .as( + "Partition(d=2020-05-03, " + + partitionKV + + ") is not committed successfully") + .contains(partitionPath); }); } finally { tEnv.executeSql("drop database if exists db1 cascade"); @@ -662,6 +657,6 @@ private void assertBatch(String table, List expected) { .forEachRemaining(r -> results.add(r.toString())); results.sort(String::compareTo); expected.sort(String::compareTo); - Assert.assertEquals(expected, results); + assertThat(results).isEqualTo(expected); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSourceITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSourceITCase.java index 40757540077fa..1b1e06834dc82 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSourceITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSourceITCase.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.mapred.JobConf; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Assume; import org.junit.Before; import org.junit.BeforeClass; @@ -80,10 +79,7 @@ import static org.apache.flink.table.catalog.hive.HiveTestUtils.createTableEnvWithHiveCatalog; import static org.apache.flink.table.planner.utils.JavaScalaConversionUtil.toScala; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; @@ -130,11 +126,11 @@ public void testReadNonPartitionedTable() throws Exception { Table src = batchTableEnv.sqlQuery("select * from hive.source_db.test"); List rows = CollectionUtil.iteratorToList(src.execute().collect()); - Assert.assertEquals(4, rows.size()); - Assert.assertEquals("+I[1, 1, a, 1000, 1.11]", rows.get(0).toString()); - Assert.assertEquals("+I[2, 2, b, 2000, 2.22]", rows.get(1).toString()); - Assert.assertEquals("+I[3, 3, c, 3000, 3.33]", rows.get(2).toString()); - Assert.assertEquals("+I[4, 4, d, 4000, 4.44]", rows.get(3).toString()); + assertThat(rows).hasSize(4); + assertThat(rows.get(0).toString()).isEqualTo("+I[1, 1, a, 1000, 1.11]"); + assertThat(rows.get(1).toString()).isEqualTo("+I[2, 2, b, 2000, 2.22]"); + assertThat(rows.get(2).toString()).isEqualTo("+I[3, 3, c, 3000, 3.33]"); + assertThat(rows.get(3).toString()).isEqualTo("+I[4, 4, d, 4000, 4.44]"); } @Test @@ -154,10 +150,10 @@ public void testReadComplexDataType() throws Exception { .commit(); Table src = batchTableEnv.sqlQuery("select * from hive.source_db.complex_test"); List rows = CollectionUtil.iteratorToList(src.execute().collect()); - Assert.assertEquals(1, rows.size()); - assertArrayEquals(array, (Integer[]) rows.get(0).getField(0)); - assertEquals(map, rows.get(0).getField(1)); - assertEquals(Row.of(struct[0], struct[1]), rows.get(0).getField(2)); + assertThat(rows).hasSize(1); + assertThat((Integer[]) rows.get(0).getField(0)).isEqualTo(array); + assertThat(rows.get(0).getField(1)).isEqualTo(map); + assertThat(rows.get(0).getField(2)).isEqualTo(Row.of(struct[0], struct[1])); } /** @@ -183,13 +179,13 @@ public void testReadPartitionTable() throws Exception { Table src = batchTableEnv.sqlQuery("select * from hive.source_db.test_table_pt"); List rows = CollectionUtil.iteratorToList(src.execute().collect()); - assertEquals(4, rows.size()); + assertThat(rows).hasSize(4); Object[] rowStrings = rows.stream().map(Row::toString).sorted().toArray(); - assertArrayEquals( - new String[] { - "+I[2014, 3, 0]", "+I[2014, 4, 0]", "+I[2015, 2, 1]", "+I[2015, 5, 1]" - }, - rowStrings); + assertThat(rowStrings) + .isEqualTo( + new String[] { + "+I[2014, 3, 0]", "+I[2014, 4, 0]", "+I[2015, 2, 1]", "+I[2015, 5, 1]" + }); } @Test @@ -211,17 +207,17 @@ public void testPartitionPrunning() throws Exception { batchTableEnv.sqlQuery("select * from hive.source_db.test_table_pt_1 where pt = 0"); // first check execution plan to ensure partition prunning works String[] explain = src.explain().split("==.*==\n"); - assertEquals(4, explain.length); + assertThat(explain).hasSize(4); String optimizedLogicalPlan = explain[2]; - assertTrue( - optimizedLogicalPlan, - optimizedLogicalPlan.contains( - "table=[[hive, source_db, test_table_pt_1, partitions=[{pt=0}], project=[year, value]]]")); + assertThat(optimizedLogicalPlan) + .as(optimizedLogicalPlan) + .contains( + "table=[[hive, source_db, test_table_pt_1, partitions=[{pt=0}], project=[year, value]]]"); // second check execute results List rows = CollectionUtil.iteratorToList(src.execute().collect()); - assertEquals(2, rows.size()); + assertThat(rows).hasSize(2); Object[] rowStrings = rows.stream().map(Row::toString).sorted().toArray(); - assertArrayEquals(new String[] {"+I[2014, 3, 0]", "+I[2014, 4, 0]"}, rowStrings); + assertThat(rowStrings).isEqualTo(new String[] {"+I[2014, 3, 0]", "+I[2014, 4, 0]"}); } @Test @@ -254,71 +250,69 @@ public void testPartitionFilter() throws Exception { Table query = tableEnv.sqlQuery("select x from db1.part where p1>1 or p2<>'a' order by x"); String[] explain = query.explain().split("==.*==\n"); - assertFalse(catalog.fallback); + assertThat(catalog.fallback).isFalse(); String optimizedPlan = explain[2]; - assertTrue( - optimizedPlan, - optimizedPlan.contains( - "table=[[test-catalog, db1, part, partitions=[{p1=2, p2=b}, {p1=3, p2=c}, {p1=4, p2=c:2}]")); + assertThat(optimizedPlan) + .as(optimizedPlan) + .contains( + "table=[[test-catalog, db1, part, partitions=[{p1=2, p2=b}, {p1=3, p2=c}, {p1=4, p2=c:2}]"); List results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[+I[2], +I[3], +I[4]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[2], +I[3], +I[4]]"); query = tableEnv.sqlQuery("select x from db1.part where p1>2 and p2<='a' order by x"); explain = query.explain().split("==.*==\n"); - assertFalse(catalog.fallback); + assertThat(catalog.fallback).isFalse(); optimizedPlan = explain[2]; - assertTrue( - optimizedPlan, - optimizedPlan.contains( - "table=[[test-catalog, db1, part, partitions=[], project=[x]]]")); + assertThat(optimizedPlan) + .as(optimizedPlan) + .contains("table=[[test-catalog, db1, part, partitions=[], project=[x]]]"); results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[]", results.toString()); + assertThat(results.toString()).isEqualTo("[]"); query = tableEnv.sqlQuery("select x from db1.part where p1 in (1,3,5) order by x"); explain = query.explain().split("==.*==\n"); - assertFalse(catalog.fallback); + assertThat(catalog.fallback).isFalse(); optimizedPlan = explain[2]; - assertTrue( - optimizedPlan, - optimizedPlan.contains( - "table=[[test-catalog, db1, part, partitions=[{p1=1, p2=a}, {p1=3, p2=c}], project=[x]]]")); + assertThat(optimizedPlan) + .as(optimizedPlan) + .contains( + "table=[[test-catalog, db1, part, partitions=[{p1=1, p2=a}, {p1=3, p2=c}], project=[x]]]"); results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[+I[1], +I[3]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1], +I[3]]"); query = tableEnv.sqlQuery( "select x from db1.part where (p1=1 and p2='a') or ((p1=2 and p2='b') or p2='d') order by x"); explain = query.explain().split("==.*==\n"); - assertFalse(catalog.fallback); + assertThat(catalog.fallback).isFalse(); optimizedPlan = explain[2]; - assertTrue( - optimizedPlan, - optimizedPlan.contains( - "table=[[test-catalog, db1, part, partitions=[{p1=1, p2=a}, {p1=2, p2=b}], project=[x]]]")); + assertThat(optimizedPlan) + .as(optimizedPlan) + .contains( + "table=[[test-catalog, db1, part, partitions=[{p1=1, p2=a}, {p1=2, p2=b}], project=[x]]]"); results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[+I[1], +I[2]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1], +I[2]]"); query = tableEnv.sqlQuery("select x from db1.part where p2 = 'c:2' order by x"); explain = query.explain().split("==.*==\n"); - assertFalse(catalog.fallback); + assertThat(catalog.fallback).isFalse(); optimizedPlan = explain[2]; - assertTrue( - optimizedPlan, - optimizedPlan.contains( - "table=[[test-catalog, db1, part, partitions=[{p1=4, p2=c:2}], project=[x]]]")); + assertThat(optimizedPlan) + .as(optimizedPlan) + .contains( + "table=[[test-catalog, db1, part, partitions=[{p1=4, p2=c:2}], project=[x]]]"); results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[+I[4]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[4]]"); query = tableEnv.sqlQuery("select x from db1.part where '' = p2"); explain = query.explain().split("==.*==\n"); - assertFalse(catalog.fallback); + assertThat(catalog.fallback).isFalse(); optimizedPlan = explain[2]; - assertTrue( - optimizedPlan, - optimizedPlan.contains( - "table=[[test-catalog, db1, part, partitions=[], project=[x]]]")); + assertThat(optimizedPlan) + .as(optimizedPlan) + .contains("table=[[test-catalog, db1, part, partitions=[], project=[x]]]"); results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[]", results.toString()); + assertThat(results.toString()).isEqualTo("[]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -353,21 +347,21 @@ public void testPartitionFilterDateTimestamp() throws Exception { tableEnv.sqlQuery( "select x from db1.part where p1>cast('2018-08-09' as date) and p2<>cast('2018-08-08 08:08:09.1' as timestamp)"); String[] explain = query.explain().split("==.*==\n"); - assertTrue(catalog.fallback); + assertThat(catalog.fallback).isTrue(); String optimizedPlan = explain[2]; - assertTrue( - optimizedPlan, - optimizedPlan.contains( - "table=[[test-catalog, db1, part, partitions=[{p1=2018-08-10, p2=2018-08-08 08:08:10.1}]")); + assertThat(optimizedPlan) + .as(optimizedPlan) + .contains( + "table=[[test-catalog, db1, part, partitions=[{p1=2018-08-10, p2=2018-08-08 08:08:10.1}]"); List results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[+I[3]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[3]]"); // filter by timestamp partition query = tableEnv.sqlQuery( "select x from db1.part where timestamp '2018-08-08 08:08:09.1' = p2"); results = CollectionUtil.iteratorToList(query.execute().collect()); - assertEquals("[+I[2]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[2]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -389,15 +383,15 @@ public void testProjectionPushDown() throws Exception { batchTableEnv.sqlQuery( "select p1, count(y) from hive.`default`.src group by p1"); String[] explain = table.explain().split("==.*==\n"); - assertEquals(4, explain.length); + assertThat(explain).hasSize(4); String logicalPlan = explain[2]; String expectedExplain = "table=[[hive, default, src, project=[p1, y]]]"; - assertTrue(logicalPlan, logicalPlan.contains(expectedExplain)); + assertThat(logicalPlan).as(logicalPlan).contains(expectedExplain); List rows = CollectionUtil.iteratorToList(table.execute().collect()); - assertEquals(2, rows.size()); + assertThat(rows).hasSize(2); Object[] rowStrings = rows.stream().map(Row::toString).sorted().toArray(); - assertArrayEquals(new String[] {"+I[2013, 2]", "+I[2014, 1]"}, rowStrings); + assertThat(rowStrings).isEqualTo(new String[] {"+I[2013, 2]", "+I[2014, 1]"}); } finally { batchTableEnv.executeSql("drop table src"); } @@ -415,15 +409,16 @@ public void testLimitPushDown() throws Exception { .commit(); Table table = batchTableEnv.sqlQuery("select * from hive.`default`.src limit 1"); String[] explain = table.explain().split("==.*==\n"); - assertEquals(4, explain.length); + assertThat(explain).hasSize(4); String logicalPlan = explain[2]; - assertTrue( - logicalPlan, logicalPlan.contains("table=[[hive, default, src, limit=[1]]]")); + assertThat(logicalPlan) + .as(logicalPlan) + .contains("table=[[hive, default, src, limit=[1]]]"); List rows = CollectionUtil.iteratorToList(table.execute().collect()); - assertEquals(1, rows.size()); + assertThat(rows).hasSize(1); Object[] rowStrings = rows.stream().map(Row::toString).sorted().toArray(); - assertArrayEquals(new String[] {"+I[a]"}, rowStrings); + assertThat(rowStrings).isEqualTo(new String[] {"+I[a]"}); } finally { batchTableEnv.executeSql("drop table src"); } @@ -486,7 +481,7 @@ private void testParallelismSettingTranslateAndAssert( .getRootNodes() .get(0); Transformation transformation = execNode.translateToPlan(planner); - Assert.assertEquals(expected, transformation.getParallelism()); + assertThat(transformation.getParallelism()).isEqualTo(expected); } @Test @@ -519,7 +514,7 @@ public void testParallelismOnLimitPushDown() throws Exception { Transformation transformation = (execNode.translateToPlan(planner).getInputs().get(0)).getInputs().get(0); // when there's no infer, should use the default parallelism configured - Assert.assertEquals(2, transformation.getParallelism()); + assertThat(transformation.getParallelism()).isEqualTo(2); } @Test @@ -553,11 +548,11 @@ public void testParallelismWithoutParallelismInfer() throws Exception { Transformation transformation = (execNode.translateToPlan(planner).getInputs().get(0)).getInputs().get(0); // when there's no infer, should use the default parallelism - Assert.assertEquals( - ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM - .defaultValue() - .intValue(), - transformation.getParallelism()); + assertThat(transformation.getParallelism()) + .isEqualTo( + ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM + .defaultValue() + .intValue()); } @Test @@ -608,8 +603,8 @@ public void testStreamPartitionReadByPartitionName() throws Exception { tEnv.executeSql("select * from hive.source_db.stream_partition_name_test"); CloseableIterator iter = result.collect(); - Assert.assertEquals( - Row.of(1, "b", "12", "2020", "09", "03").toString(), fetchRows(iter, 1).get(0)); + assertThat(fetchRows(iter, 1).get(0)) + .isEqualTo(Row.of(1, "b", "12", "2020", "09", "03").toString()); for (int i = 2; i < 6; i++) { try { @@ -622,11 +617,12 @@ public void testStreamPartitionReadByPartitionName() throws Exception { .addRow(new Object[] {i, "new_add_1", 11 + i}) .commit("pt_year='2020',pt_mon='10',pt_day='0" + i + "'"); - Assert.assertEquals( - Arrays.asList( - Row.of(i, "new_add", 11 + i, "2020", "10", "0" + i).toString(), - Row.of(i, "new_add_1", 11 + i, "2020", "10", "0" + i).toString()), - fetchRows(iter, 2)); + assertThat(fetchRows(iter, 2)) + .isEqualTo( + Arrays.asList( + Row.of(i, "new_add", 11 + i, "2020", "10", "0" + i).toString(), + Row.of(i, "new_add_1", 11 + i, "2020", "10", "0" + i) + .toString())); } result.getJobClient().get().cancel(); @@ -664,8 +660,8 @@ public void testStreamPartitionReadByCreateTime() throws Exception { tEnv.executeSql("select * from hive.source_db.stream_create_time_test"); CloseableIterator iter = result.collect(); - Assert.assertEquals( - Row.of(0, "a", "11", "A1", "B1", "C1").toString(), fetchRows(iter, 1).get(0)); + assertThat(fetchRows(iter, 1).get(0)) + .isEqualTo(Row.of(0, "a", "11", "A1", "B1", "C1").toString()); for (int i = 1; i < 6; i++) { try { @@ -678,11 +674,11 @@ public void testStreamPartitionReadByCreateTime() throws Exception { .addRow(new Object[] {i, "new_add_1", 11 + i}) .commit("p1='A',p2='B',p3='" + i + "'"); - Assert.assertEquals( - Arrays.asList( - Row.of(i, "new_add", 11 + i, "A", "B", i).toString(), - Row.of(i, "new_add_1", 11 + i, "A", "B", i).toString()), - fetchRows(iter, 2)); + assertThat(fetchRows(iter, 2)) + .isEqualTo( + Arrays.asList( + Row.of(i, "new_add", 11 + i, "A", "B", i).toString(), + Row.of(i, "new_add_1", 11 + i, "A", "B", i).toString())); } result.getJobClient().get().cancel(); } @@ -715,8 +711,8 @@ public void testStreamPartitionReadByPartitionTime() throws Exception { TableResult result = tEnv.executeSql("select * from hive.source_db.stream_test"); CloseableIterator iter = result.collect(); - Assert.assertEquals( - Row.of(0, "0", "2020-05-06 00:00:00").toString(), fetchRows(iter, 1).get(0)); + assertThat(fetchRows(iter, 1).get(0)) + .isEqualTo(Row.of(0, "0", "2020-05-06 00:00:00").toString()); for (int i = 1; i < 6; i++) { try { @@ -729,11 +725,13 @@ public void testStreamPartitionReadByPartitionTime() throws Exception { .addRow(new Object[] {i, i + "_copy"}) .commit("ts='2020-05-06 00:" + i + "0:00'"); - Assert.assertEquals( - Arrays.asList( - Row.of(i, String.valueOf(i), "2020-05-06 00:" + i + "0:00").toString(), - Row.of(i, i + "_copy", "2020-05-06 00:" + i + "0:00").toString()), - fetchRows(iter, 2)); + assertThat(fetchRows(iter, 2)) + .isEqualTo( + Arrays.asList( + Row.of(i, String.valueOf(i), "2020-05-06 00:" + i + "0:00") + .toString(), + Row.of(i, i + "_copy", "2020-05-06 00:" + i + "0:00") + .toString())); } result.getJobClient().get().cancel(); @@ -742,7 +740,7 @@ public void testStreamPartitionReadByPartitionTime() throws Exception { private static List fetchRows(Iterator iter, int size) { List strings = new ArrayList<>(size); for (int i = 0; i < size; i++) { - Assert.assertTrue(iter.hasNext()); + assertThat(iter.hasNext()).isTrue(); strings.add(iter.next().toString()); } strings.sort(String::compareTo); @@ -793,9 +791,8 @@ private void testNonPartitionStreamingSource(Boolean useMapredReader, String tbl .executeSql( "insert into table source_db." + tblName + " values (1,'a'), (2,'b')") .await(); - Assert.assertEquals( - Arrays.asList(Row.of(1, "a").toString(), Row.of(2, "b").toString()), - fetchRows(iter, 2)); + assertThat(fetchRows(iter, 2)) + .isEqualTo(Arrays.asList(Row.of(1, "a").toString(), Row.of(2, "b").toString())); } result.getJobClient().get().cancel(); @@ -808,12 +805,12 @@ private void testSourceConfig(boolean fallbackMR, boolean inferParallelism) thro doAnswer( invocation -> { TableSourceFactory.Context context = invocation.getArgument(0); - assertEquals( - fallbackMR, - context.getConfiguration() - .get( - HiveOptions - .TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER)); + assertThat( + context.getConfiguration() + .get( + HiveOptions + .TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER)) + .isEqualTo(fallbackMR); return new TestConfigSource( new JobConf(hiveCatalog.getHiveConf()), context.getConfiguration(), @@ -838,7 +835,7 @@ private void testSourceConfig(boolean fallbackMR, boolean inferParallelism) thro List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from db1.src order by x").execute().collect()); - assertEquals("[+I[1, a], +I[2, b]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, a], +I[2, b]]"); } @Test @@ -865,8 +862,8 @@ private void testCaseInsensitive(String format) throws Exception { String.format( "create external table parquet_t (i int, j int) stored as %s location '%s'", format, folderURI)); - Assert.assertEquals( - Row.of(1, 2), tEnv.executeSql("select * from parquet_t").collect().next()); + assertThat(tEnv.executeSql("select * from parquet_t").collect().next()) + .isEqualTo(Row.of(1, 2)); } @Test(timeout = 120000) @@ -901,7 +898,7 @@ public void testStreamReadWithProjectPushDown() throws Exception { "select x, y from hive.source_db.stream_project_pushdown_test where pt_year = '2020'"); CloseableIterator iter = result.collect(); - Assert.assertEquals(Row.of(1, "b").toString(), fetchRows(iter, 1).get(0)); + assertThat(fetchRows(iter, 1).get(0)).isEqualTo(Row.of(1, "b").toString()); for (int i = 2; i < 6; i++) { try { @@ -914,10 +911,11 @@ public void testStreamReadWithProjectPushDown() throws Exception { .addRow(new Object[] {i, "new_add_1", 11 + i}) .commit("pt_year='2020',pt_mon='10',pt_day='0" + i + "'"); - Assert.assertEquals( - Arrays.asList( - Row.of(i, "new_add").toString(), Row.of(i, "new_add_1").toString()), - fetchRows(iter, 2)); + assertThat(fetchRows(iter, 2)) + .isEqualTo( + Arrays.asList( + Row.of(i, "new_add").toString(), + Row.of(i, "new_add_1").toString())); } result.getJobClient().get().cancel(); @@ -950,7 +948,7 @@ public DataStream getDataStream( DataStreamSource dataStream = (DataStreamSource) super.getDataStream(providerContext, execEnv); int parallelism = dataStream.getTransformation().getParallelism(); - assertEquals(inferParallelism ? 1 : 2, parallelism); + assertThat(parallelism).isEqualTo(inferParallelism ? 1 : 2); return dataStream; } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/PartitionMonitorTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/PartitionMonitorTest.java index 0816092e9ae3b..25075d760de92 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/PartitionMonitorTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/PartitionMonitorTest.java @@ -43,8 +43,7 @@ import java.util.Properties; import java.util.stream.Collectors; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link ContinuousHiveSplitEnumerator.PartitionMonitor}. */ public class PartitionMonitorTest { @@ -80,10 +79,9 @@ public void testPartitionWithSameCreateTime() throws Exception { private void assertPartitionEquals( Collection> expected, Collection> actual) { - assertTrue(expected != null && actual != null && expected.size() == actual.size()); - assertArrayEquals( - expected.stream().map(Object::toString).sorted().toArray(), - actual.stream().map(Object::toString).sorted().toArray()); + assertThat(expected != null && actual != null && expected.size() == actual.size()).isTrue(); + assertThat(actual.stream().map(Object::toString).sorted().toArray()) + .isEqualTo(expected.stream().map(Object::toString).sorted().toArray()); } private void commitPartitionWithGivenCreateTime( diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/TableEnvHiveConnectorITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/TableEnvHiveConnectorITCase.java index 349264d071d24..8ea26b0d0e986 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/TableEnvHiveConnectorITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/TableEnvHiveConnectorITCase.java @@ -52,9 +52,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test hive connector with table API. */ public class TableEnvHiveConnectorITCase { @@ -90,7 +88,7 @@ public void testOverwriteWithEmptySource() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from dest").collect()); - assertEquals(0, results.size()); + assertThat(results).isEmpty(); // dynamic partitioned table tableEnv.executeSql("create table destp (x int) partitioned by (p int)"); HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "destp") @@ -104,14 +102,14 @@ public void testOverwriteWithEmptySource() throws Exception { results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from destp order by x").collect()); - assertEquals("[+I[1, 1], +I[2, 2]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, 1], +I[2, 2]]"); // static partitioned table tableEnv.executeSql("insert overwrite table destp partition(p=1) select x from src") .await(); results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from destp order by x").collect()); - assertEquals("[+I[1, 1], +I[2, 2]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, 1], +I[2, 2]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -151,8 +149,8 @@ public void testMultiInputBroadcast() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from dest order by key").collect()); - assertEquals( - "[+I[3, val3], +I[3, val3], +I[4, null], +I[4, null]]", results.toString()); + assertThat(results.toString()) + .isEqualTo("[+I[3, val3], +I[3, val3], +I[4, null], +I[4, null]]"); } finally { tableEnv.useDatabase("default"); tableEnv.executeSql("drop database db1 cascade"); @@ -177,12 +175,12 @@ public void testDefaultPartitionName() throws Exception { Table hiveTable = hmsClient.getTable("db1", "part"); Path defaultPartPath = new Path(hiveTable.getSd().getLocation(), "y=" + defaultPartName); FileSystem fs = defaultPartPath.getFileSystem(hiveConf); - assertTrue(fs.exists(defaultPartPath)); + assertThat(fs.exists(defaultPartPath)).isTrue(); TableImpl flinkTable = (TableImpl) tableEnv.sqlQuery("select y, x from db1.part order by x"); List rows = CollectionUtil.iteratorToList(flinkTable.execute().collect()); - assertEquals("[+I[1, 1], +I[null, 2]]", rows.toString()); + assertThat(rows.toString()).isEqualTo("[+I[1, 1], +I[null, 2]]"); tableEnv.executeSql("drop database db1 cascade"); } @@ -219,9 +217,9 @@ public void testDateTimestampPartitionColumns() throws Exception { tableEnv.sqlQuery("select * from db1.part order by x") .execute() .collect()); - assertEquals( - "[+I[1, 2019-12-23, 2019-12-23T00:00], +I[2, 2019-12-23, 2019-12-23T00:00], +I[3, 2019-12-25, 2019-12-25T16:23:43.012]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[1, 2019-12-23, 2019-12-23T00:00], +I[2, 2019-12-23, 2019-12-23T00:00], +I[3, 2019-12-25, 2019-12-25T16:23:43.012]]"); results = CollectionUtil.iteratorToList( @@ -229,7 +227,7 @@ public void testDateTimestampPartitionColumns() throws Exception { "select x from db1.part where dt=cast('2019-12-25' as date)") .execute() .collect()); - assertEquals("[+I[3]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[3]]"); tableEnv.executeSql( "insert into db1.part select 4,cast('2019-12-31' as date),cast('2019-12-31 12:00:00.0' as timestamp)") @@ -237,7 +235,7 @@ public void testDateTimestampPartitionColumns() throws Exception { results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select max(dt) from db1.part").execute().collect()); - assertEquals("[+I[2019-12-31]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[2019-12-31]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -281,14 +279,14 @@ public void testUDTF() throws Exception { "select x from db1.simple, lateral table(hiveudtf(a)) as T(x)") .execute() .collect()); - assertEquals("[+I[1], +I[2], +I[3]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1], +I[2], +I[3]]"); results = CollectionUtil.iteratorToList( tableEnv.sqlQuery( "select x from db1.nested, lateral table(hiveudtf(a)) as T(x)") .execute() .collect()); - assertEquals("[+I[{1=a, 2=b}], +I[{3=c}]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[{1=a, 2=b}], +I[{3=c}]]"); tableEnv.executeSql("create table db1.ts (a array)"); HiveTestUtils.createTextTableInserter(hiveCatalog, "db1", "ts") @@ -306,7 +304,8 @@ public void testUDTF() throws Exception { "select x from db1.ts, lateral table(hiveudtf(a)) as T(x)") .execute() .collect()); - assertEquals("[+I[2015-04-28T15:23], +I[2016-06-03T17:05:52]]", results.toString()); + assertThat(results.toString()) + .isEqualTo("[+I[2015-04-28T15:23], +I[2016-06-03T17:05:52]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); tableEnv.executeSql("drop function hiveudtf"); @@ -323,15 +322,15 @@ public void testNotNullConstraints() throws Exception { "create table db1.tbl (x int,y bigint not null enable rely,z string not null enable norely)"); CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl")); TableSchema tableSchema = catalogTable.getSchema(); - assertTrue( - "By default columns should be nullable", - tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable()); - assertFalse( - "NOT NULL columns should be reflected in table schema", - tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable()); - assertTrue( - "NOT NULL NORELY columns should be considered nullable", - tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable()); + assertThat(tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable()) + .as("By default columns should be nullable") + .isTrue(); + assertThat(tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable()) + .as("NOT NULL columns should be reflected in table schema") + .isFalse(); + assertThat(tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable()) + .as("NOT NULL NORELY columns should be considered nullable") + .isTrue(); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -351,23 +350,23 @@ public void testPKConstraint() throws Exception { "create table db1.tbl1 (x tinyint,y smallint,z int, primary key (x,z) disable novalidate rely)"); CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl1")); TableSchema tableSchema = catalogTable.getSchema(); - assertTrue(tableSchema.getPrimaryKey().isPresent()); + assertThat(tableSchema.getPrimaryKey()).isPresent(); UniqueConstraint pk = tableSchema.getPrimaryKey().get(); - assertEquals(2, pk.getColumns().size()); - assertTrue(pk.getColumns().containsAll(Arrays.asList("x", "z"))); + assertThat(pk.getColumns()).hasSize(2); + assertThat(pk.getColumns().containsAll(Arrays.asList("x", "z"))).isTrue(); // test norely PK constraints tableEnv.executeSql( "create table db1.tbl2 (x tinyint,y smallint, primary key (x) disable norely)"); catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl2")); tableSchema = catalogTable.getSchema(); - assertFalse(tableSchema.getPrimaryKey().isPresent()); + assertThat(tableSchema.getPrimaryKey()).isNotPresent(); // test table w/o PK tableEnv.executeSql("create table db1.tbl3 (x tinyint)"); catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl3")); tableSchema = catalogTable.getSchema(); - assertFalse(tableSchema.getPrimaryKey().isPresent()); + assertThat(tableSchema.getPrimaryKey()).isNotPresent(); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -386,13 +385,13 @@ public void testRegexSerDe() throws Exception { .addRow(new Object[] {1, "a"}) .addRow(new Object[] {2, "ab"}) .commit(); - assertEquals( - "[+I[1, a], +I[2, ab]]", - CollectionUtil.iteratorToList( - tableEnv.sqlQuery("select * from db1.src order by x") - .execute() - .collect()) - .toString()); + assertThat( + CollectionUtil.iteratorToList( + tableEnv.sqlQuery("select * from db1.src order by x") + .execute() + .collect()) + .toString()) + .isEqualTo("[+I[1, a], +I[2, ab]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -408,11 +407,13 @@ public void testUpdatePartitionSD() throws Exception { tableEnv.executeSql("insert overwrite db1.dest partition (p='1') select 1").await(); tableEnv.executeSql("alter table db1.dest set fileformat sequencefile"); tableEnv.executeSql("insert overwrite db1.dest partition (p='1') select 1").await(); - assertEquals( - "[+I[1, 1]]", - CollectionUtil.iteratorToList( - tableEnv.sqlQuery("select * from db1.dest").execute().collect()) - .toString()); + assertThat( + CollectionUtil.iteratorToList( + tableEnv.sqlQuery("select * from db1.dest") + .execute() + .collect()) + .toString()) + .isEqualTo("[+I[1, 1]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -432,16 +433,20 @@ public void testParquetNameMapping() throws Exception { "create table db1.t2 (y int,x int) stored as parquet location '%s'", location)); tableEnv.getConfig().set(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER, true); - assertEquals( - "[+I[1], +I[2]]", - CollectionUtil.iteratorToList( - tableEnv.sqlQuery("select x from db1.t1").execute().collect()) - .toString()); - assertEquals( - "[+I[1], +I[2]]", - CollectionUtil.iteratorToList( - tableEnv.sqlQuery("select x from db1.t2").execute().collect()) - .toString()); + assertThat( + CollectionUtil.iteratorToList( + tableEnv.sqlQuery("select x from db1.t1") + .execute() + .collect()) + .toString()) + .isEqualTo("[+I[1], +I[2]]"); + assertThat( + CollectionUtil.iteratorToList( + tableEnv.sqlQuery("select x from db1.t2") + .execute() + .collect()) + .toString()) + .isEqualTo("[+I[1], +I[2]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -474,7 +479,7 @@ public void testNonExistingPartitionFolder() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from db1.part").execute().collect()); - assertEquals("[+I[1, 1]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, 1]]"); } finally { tableEnv.executeSql("drop database db1 cascade"); } @@ -492,7 +497,7 @@ public void testInsertPartitionWithStarSource() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from dest").execute().collect()); - assertEquals("[+I[1, 1, a]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, 1, a]]"); tableEnv.executeSql("drop table if exists src"); tableEnv.executeSql("drop table if exists dest"); } @@ -505,7 +510,7 @@ public void testInsertPartitionWithValuesSource() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from dest").execute().collect()); - assertEquals("[+I[1, 1, a]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, 1, a]]"); tableEnv.executeSql("drop table if exists dest"); } @@ -525,7 +530,7 @@ public void testDynamicPartWithOrderBy() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from dest").collect()); - assertEquals("[+I[1, 0], +I[2, 0]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, 0], +I[2, 0]]"); } finally { tableEnv.executeSql("drop table src"); tableEnv.executeSql("drop table dest"); @@ -545,7 +550,7 @@ public void testLocationWithComma() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from tbl1").collect()); - assertEquals("[+I[1], +I[2]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1], +I[2]]"); // test partition location tableEnv.executeSql("create table tbl2 (x int) partitioned by (p string)"); location = tempFolder.newFolder(","); @@ -557,13 +562,13 @@ public void testLocationWithComma() throws Exception { results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from tbl2").collect()); - assertEquals("[+I[1, a], +I[2, a]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1, a], +I[2, a]]"); tableEnv.executeSql("insert into tbl2 partition (p) values (3,'b ,')").await(); results = CollectionUtil.iteratorToList( tableEnv.executeSql("select * from tbl2 where p='b ,'").collect()); - assertEquals("[+I[3, b ,]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[3, b ,]]"); } finally { if (location != null) { IOUtils.deleteFileQuietly(location.toPath()); @@ -591,7 +596,7 @@ public void testReadEmptyCollectionFromParquet() throws Exception { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select * from src_t").execute().collect()); - assertEquals("[+I[hehuiyuan, null, null]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[hehuiyuan, null, null]]"); } finally { tableEnv.executeSql("drop table if exists src_t"); } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveInputFormatPartitionReaderITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveInputFormatPartitionReaderITCase.java index aac9007cf107f..d3abde47e217a 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveInputFormatPartitionReaderITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveInputFormatPartitionReaderITCase.java @@ -36,7 +36,7 @@ import java.util.Collections; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for HiveInputFormatPartitionReader. */ public class HiveInputFormatPartitionReaderITCase { @@ -86,11 +86,11 @@ private void testReadFormat(TableEnvironment tableEnv, HiveCatalog hiveCatalog, while (partitionReader.read(reuse) != null) { count++; } - assertEquals( - CollectionUtil.iteratorToList( - tableEnv.executeSql("select * from " + tableName).collect()) - .size(), - count); + assertThat(count) + .isEqualTo( + CollectionUtil.iteratorToList( + tableEnv.executeSql("select * from " + tableName).collect()) + .size()); } private String prepareData(TableEnvironment tableEnv, String format) throws Exception { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HivePartitionFetcherTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HivePartitionFetcherTest.java index 46c98ec39c014..ec9ddfbfdab37 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HivePartitionFetcherTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HivePartitionFetcherTest.java @@ -45,7 +45,7 @@ import java.util.Optional; import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_PARTITION_ORDER; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for hive partition fetch implementations. */ public class HivePartitionFetcherTest { @@ -92,7 +92,7 @@ public void testIgnoreNonExistPartition() throws Exception { flinkConf, defaultPartName); fetcherContext.open(); - assertEquals(0, fetcherContext.getComparablePartitionValueList().size()); + assertThat(fetcherContext.getComparablePartitionValueList()).isEmpty(); // test create-time order flinkConf.set(STREAMING_SOURCE_PARTITION_ORDER, HiveOptions.PartitionOrder.CREATE_TIME); @@ -107,7 +107,7 @@ public void testIgnoreNonExistPartition() throws Exception { flinkConf, defaultPartName); fetcherContext.open(); - assertEquals(0, fetcherContext.getComparablePartitionValueList().size()); + assertThat(fetcherContext.getComparablePartitionValueList()).isEmpty(); // test partition-name order flinkConf.set(STREAMING_SOURCE_PARTITION_ORDER, HiveOptions.PartitionOrder.PARTITION_NAME); @@ -122,7 +122,7 @@ public void testIgnoreNonExistPartition() throws Exception { flinkConf, defaultPartName); fetcherContext.open(); - assertEquals(0, fetcherContext.getComparablePartitionValueList().size()); + assertThat(fetcherContext.getComparablePartitionValueList()).isEmpty(); } private static class MyHivePartitionFetcherContext diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveTableFileInputFormatTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveTableFileInputFormatTest.java index b2a24853558f4..9d71971e119a8 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveTableFileInputFormatTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/read/HiveTableFileInputFormatTest.java @@ -23,7 +23,6 @@ import org.apache.flink.util.FileUtils; import org.apache.hadoop.mapred.FileSplit; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -31,6 +30,8 @@ import java.io.File; import java.io.IOException; +import static org.assertj.core.api.Assertions.assertThat; + /** Test for {@link HiveTableFileInputFormat}. */ public class HiveTableFileInputFormatTest { @@ -42,6 +43,6 @@ public void testSplit() throws IOException { FileUtils.writeFileUtf8(file, "hahahahahahaha"); FileInputSplit split = new FileInputSplit(0, new Path(file.getPath()), 0, -1, null); FileSplit fileSplit = HiveTableFileInputFormat.toHadoopFileSplit(split); - Assert.assertEquals(14, fileSplit.getLength()); + assertThat(fileSplit.getLength()).isEqualTo(14); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/util/HiveConfUtilsTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/util/HiveConfUtilsTest.java index bc13505dcc873..73ca94e9a86e3 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/util/HiveConfUtilsTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/util/HiveConfUtilsTest.java @@ -19,7 +19,6 @@ package org.apache.flink.connectors.hive.util; import org.apache.hadoop.hive.conf.HiveConf; -import org.junit.Assert; import org.junit.Test; import java.io.ByteArrayInputStream; @@ -27,6 +26,8 @@ import java.io.InputStream; import java.nio.charset.StandardCharsets; +import static org.assertj.core.api.Assertions.assertThat; + /** Test for {@link HiveConfUtils}. */ public class HiveConfUtilsTest { private static final String HIVE_SITE_CONTENT = @@ -42,17 +43,19 @@ public class HiveConfUtilsTest { @Test public void testCreateHiveConf() { HiveConf hiveConf = createHiveConf(); - Assert.assertTrue(hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)); + assertThat(hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)).isTrue(); // will override configurations from `hiveConf` with hive default values which default value // is null or empty string - Assert.assertFalse( - new HiveConf(hiveConf, HiveConf.class) - .getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)); + assertThat( + new HiveConf(hiveConf, HiveConf.class) + .getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)) + .isFalse(); - Assert.assertTrue( - HiveConfUtils.create(hiveConf) - .getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)); + assertThat( + HiveConfUtils.create(hiveConf) + .getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL)) + .isTrue(); } private HiveConf createHiveConf() { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogDataTypeTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogDataTypeTest.java index d5ea1be94a329..99edaec7c51fc 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogDataTypeTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogDataTypeTest.java @@ -42,7 +42,7 @@ import java.util.HashMap; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for data type mappings in HiveCatalog. */ public class HiveCatalogDataTypeTest { @@ -212,7 +212,7 @@ private void verifyDataTypes(DataType[] types) throws Exception { catalog.createDatabase(db1, createDb(), false); catalog.createTable(path1, table, false); - assertEquals(table.getSchema(), catalog.getTable(path1).getSchema()); + assertThat(catalog.getTable(path1).getSchema()).isEqualTo(table.getSchema()); } private static CatalogDatabase createDb() { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogGenericMetadataTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogGenericMetadataTest.java index b3948af7d6fdd..764023dae91b1 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogGenericMetadataTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogGenericMetadataTest.java @@ -44,8 +44,7 @@ import java.util.Collections; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +import static org.assertj.core.api.Assertions.assertThat; /** Test for HiveCatalog on generic metadata. */ public class HiveCatalogGenericMetadataTest extends HiveCatalogMetadataTestBase { @@ -81,7 +80,7 @@ public void testGenericTableSchema() throws Exception { new CatalogTableImpl(tableSchema, getBatchTableProperties(), TEST_COMMENT), false); - assertEquals(tableSchema, catalog.getTable(tablePath).getSchema()); + assertThat(catalog.getTable(tablePath).getSchema()).isEqualTo(tableSchema); } finally { catalog.dropTable(tablePath, true); } @@ -121,7 +120,7 @@ public void testTableSchemaCompatibility() throws Exception { hiveTable.getParameters().put("flink.generic.table.schema.7.data-type", "DOUBLE"); ((HiveCatalog) catalog).client.createTable(hiveTable); CatalogBaseTable catalogBaseTable = catalog.getTable(tablePath); - assertFalse(HiveCatalog.isHiveTable(catalogBaseTable.getOptions())); + assertThat(HiveCatalog.isHiveTable(catalogBaseTable.getOptions())).isFalse(); TableSchema expectedSchema = TableSchema.builder() .fields( @@ -137,7 +136,7 @@ public void testTableSchemaCompatibility() throws Exception { }) .field("cost", DataTypes.DOUBLE(), "`d` * `bi`") .build(); - assertEquals(expectedSchema, catalogBaseTable.getSchema()); + assertThat(catalogBaseTable.getSchema()).isEqualTo(expectedSchema); // table with character types tablePath = new ObjectPath(db1, "generic2"); @@ -187,7 +186,7 @@ public void testTableSchemaCompatibility() throws Exception { }) .field("len", DataTypes.INT(), "CHAR_LENGTH(`s`)") .build(); - assertEquals(expectedSchema, catalogBaseTable.getSchema()); + assertThat(catalogBaseTable.getSchema()).isEqualTo(expectedSchema); // table with date/time types tablePath = new ObjectPath(db1, "generic3"); @@ -233,7 +232,7 @@ public void testTableSchemaCompatibility() throws Exception { }) .watermark("ts", "ts", DataTypes.TIMESTAMP(3)) .build(); - assertEquals(expectedSchema, catalogBaseTable.getSchema()); + assertThat(catalogBaseTable.getSchema()).isEqualTo(expectedSchema); // table with complex/misc types tablePath = new ObjectPath(db1, "generic4"); @@ -293,7 +292,7 @@ public void testTableSchemaCompatibility() throws Exception { }) .watermark("ts", "`ts` - INTERVAL '5' SECOND", DataTypes.TIMESTAMP(3)) .build(); - assertEquals(expectedSchema, catalogBaseTable.getSchema()); + assertThat(catalogBaseTable.getSchema()).isEqualTo(expectedSchema); } finally { catalog.dropDatabase(db1, true, true); } @@ -315,8 +314,8 @@ public void testFunctionCompatibility() throws Exception { FunctionType.JAVA, new ArrayList<>())); CatalogFunction catalogFunction = catalog.getFunction(path1); - assertEquals("class.name", catalogFunction.getClassName()); - assertEquals(FunctionLanguage.JAVA, catalogFunction.getFunctionLanguage()); + assertThat(catalogFunction.getClassName()).isEqualTo("class.name"); + assertThat(catalogFunction.getFunctionLanguage()).isEqualTo(FunctionLanguage.JAVA); } @Test @@ -332,8 +331,8 @@ public void testGenericTableWithoutConnectorProp() throws Exception { CatalogTable catalogTable = new CatalogTableImpl(tableSchema, Collections.emptyMap(), null); catalog.createTable(path1, catalogTable, false); CatalogTable retrievedTable = (CatalogTable) catalog.getTable(path1); - assertEquals(tableSchema, retrievedTable.getSchema()); - assertEquals(Collections.emptyMap(), retrievedTable.getOptions()); + assertThat(retrievedTable.getSchema()).isEqualTo(tableSchema); + assertThat(retrievedTable.getOptions()).isEmpty(); } // ------ functions ------ @@ -345,25 +344,25 @@ public void testFunctionWithNonExistClass() throws Exception { CatalogFunction catalogFunction = new CatalogFunctionImpl("non.exist.scala.class", FunctionLanguage.SCALA); catalog.createFunction(path1, catalogFunction, false); - assertEquals(catalogFunction.getClassName(), catalog.getFunction(path1).getClassName()); - assertEquals( - catalogFunction.getFunctionLanguage(), - catalog.getFunction(path1).getFunctionLanguage()); + assertThat(catalog.getFunction(path1).getClassName()) + .isEqualTo(catalogFunction.getClassName()); + assertThat(catalog.getFunction(path1).getFunctionLanguage()) + .isEqualTo(catalogFunction.getFunctionLanguage()); // alter the function catalogFunction = new CatalogFunctionImpl("non.exist.java.class", FunctionLanguage.JAVA); catalog.alterFunction(path1, catalogFunction, false); - assertEquals(catalogFunction.getClassName(), catalog.getFunction(path1).getClassName()); - assertEquals( - catalogFunction.getFunctionLanguage(), - catalog.getFunction(path1).getFunctionLanguage()); + assertThat(catalog.getFunction(path1).getClassName()) + .isEqualTo(catalogFunction.getClassName()); + assertThat(catalog.getFunction(path1).getFunctionLanguage()) + .isEqualTo(catalogFunction.getFunctionLanguage()); catalogFunction = new CatalogFunctionImpl("non.exist.python.class", FunctionLanguage.PYTHON); catalog.alterFunction(path1, catalogFunction, false); - assertEquals(catalogFunction.getClassName(), catalog.getFunction(path1).getClassName()); - assertEquals( - catalogFunction.getFunctionLanguage(), - catalog.getFunction(path1).getFunctionLanguage()); + assertThat(catalog.getFunction(path1).getClassName()) + .isEqualTo(catalogFunction.getClassName()); + assertThat(catalog.getFunction(path1).getFunctionLanguage()) + .isEqualTo(catalogFunction.getFunctionLanguage()); } // ------ partitions ------ diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogHiveMetadataTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogHiveMetadataTest.java index cd98c332f3347..422814f999859 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogHiveMetadataTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogHiveMetadataTest.java @@ -63,17 +63,13 @@ import org.junit.Test; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable.IDENTIFIER; import static org.apache.flink.table.factories.FactoryUtil.CONNECTOR; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test for HiveCatalog on Hive metadata. */ public class HiveCatalogHiveMetadataTest extends HiveCatalogMetadataTestBase { @@ -100,9 +96,9 @@ public void testCreateTable_StorageFormatSet() throws Exception { String inputFormat = hiveTable.getSd().getInputFormat(); String outputFormat = hiveTable.getSd().getOutputFormat(); String serde = hiveTable.getSd().getSerdeInfo().getSerializationLib(); - assertFalse(StringUtils.isNullOrWhitespaceOnly(inputFormat)); - assertFalse(StringUtils.isNullOrWhitespaceOnly(outputFormat)); - assertFalse(StringUtils.isNullOrWhitespaceOnly(serde)); + assertThat(StringUtils.isNullOrWhitespaceOnly(inputFormat)).isFalse(); + assertThat(StringUtils.isNullOrWhitespaceOnly(outputFormat)).isFalse(); + assertThat(StringUtils.isNullOrWhitespaceOnly(serde)).isFalse(); } // ------ table and column stats ------ @@ -144,12 +140,12 @@ public void testViewCompatibility() throws Exception { ((HiveCatalog) catalog).client.createTable(hiveView); CatalogBaseTable baseTable = catalog.getTable(path1); - assertTrue(baseTable instanceof CatalogView); + assertThat(baseTable).isInstanceOf(CatalogView.class); CatalogView catalogView = (CatalogView) baseTable; - assertEquals(schema, catalogView.getUnresolvedSchema()); - assertEquals(originQuery, catalogView.getOriginalQuery()); - assertEquals(expandedQuery, catalogView.getExpandedQuery()); - assertEquals("v1", catalogView.getOptions().get("k1")); + assertThat(catalogView.getUnresolvedSchema()).isEqualTo(schema); + assertThat(catalogView.getOriginalQuery()).isEqualTo(originQuery); + assertThat(catalogView.getExpandedQuery()).isEqualTo(expandedQuery); + assertThat(catalogView.getOptions().get("k1")).isEqualTo("v1"); // test mark as non-generic with connector hiveView.setDbName(path3.getDatabaseName()); @@ -159,12 +155,12 @@ public void testViewCompatibility() throws Exception { ((HiveCatalog) catalog).client.createTable(hiveView); baseTable = catalog.getTable(path3); - assertTrue(baseTable instanceof CatalogView); + assertThat(baseTable).isInstanceOf(CatalogView.class); catalogView = (CatalogView) baseTable; - assertEquals(schema, catalogView.getUnresolvedSchema()); - assertEquals(originQuery, catalogView.getOriginalQuery()); - assertEquals(expandedQuery, catalogView.getExpandedQuery()); - assertEquals("v1", catalogView.getOptions().get("k1")); + assertThat(catalogView.getUnresolvedSchema()).isEqualTo(schema); + assertThat(catalogView.getOriginalQuery()).isEqualTo(originQuery); + assertThat(catalogView.getExpandedQuery()).isEqualTo(expandedQuery); + assertThat(catalogView.getOptions().get("k1")).isEqualTo("v1"); } @Test @@ -270,13 +266,16 @@ public void testCreateTableWithConstraints() throws Exception { new CatalogTableImpl(builder.build(), getBatchTableProperties(), null), false); CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(path1); - assertTrue("PK not present", catalogTable.getSchema().getPrimaryKey().isPresent()); + assertThat(catalogTable.getSchema().getPrimaryKey()).as("PK not present").isPresent(); UniqueConstraint pk = catalogTable.getSchema().getPrimaryKey().get(); - assertEquals("pk_name", pk.getName()); - assertEquals(Collections.singletonList("x"), pk.getColumns()); - assertFalse(catalogTable.getSchema().getFieldDataTypes()[0].getLogicalType().isNullable()); - assertFalse(catalogTable.getSchema().getFieldDataTypes()[1].getLogicalType().isNullable()); - assertTrue(catalogTable.getSchema().getFieldDataTypes()[2].getLogicalType().isNullable()); + assertThat(pk.getName()).isEqualTo("pk_name"); + assertThat(pk.getColumns()).containsExactly("x"); + assertThat(catalogTable.getSchema().getFieldDataTypes()[0].getLogicalType().isNullable()) + .isFalse(); + assertThat(catalogTable.getSchema().getFieldDataTypes()[1].getLogicalType().isNullable()) + .isFalse(); + assertThat(catalogTable.getSchema().getFieldDataTypes()[2].getLogicalType().isNullable()) + .isTrue(); hiveCatalog.dropDatabase(db1, false, true); } @@ -288,11 +287,10 @@ public void testAlterPartition() throws Exception { catalog.createTable(path1, createPartitionedTable(), false); catalog.createPartition(path1, createPartitionSpec(), createPartition(), false); - assertEquals( - Collections.singletonList(createPartitionSpec()), catalog.listPartitions(path1)); + assertThat(catalog.listPartitions(path1)).containsExactly(createPartitionSpec()); CatalogPartition cp = catalog.getPartition(path1, createPartitionSpec()); CatalogTestUtil.checkEquals(createPartition(), cp); - assertNull(cp.getProperties().get("k")); + assertThat(cp.getProperties().get("k")).isNull(); CatalogPartition another = createPartition(); another.getProperties().put("k", "v"); @@ -303,13 +301,12 @@ public void testAlterPartition() throws Exception { catalog.alterPartition(path1, createPartitionSpec(), another, false); - assertEquals( - Collections.singletonList(createPartitionSpec()), catalog.listPartitions(path1)); + assertThat(catalog.listPartitions(path1)).containsExactly(createPartitionSpec()); cp = catalog.getPartition(path1, createPartitionSpec()); CatalogTestUtil.checkEquals(another, cp); - assertEquals("v", cp.getProperties().get("k")); + assertThat(cp.getProperties().get("k")).isEqualTo("v"); } private void checkStatistics(int inputStat, int expectStat) throws Exception { @@ -327,10 +324,10 @@ private void checkStatistics(int inputStat, int expectStat) throws Exception { catalog.createTable(path1, catalogTable, false); CatalogTableStatistics statistics = catalog.getTableStatistics(path1); - assertEquals(expectStat, statistics.getRowCount()); - assertEquals(expectStat, statistics.getFileCount()); - assertEquals(expectStat, statistics.getRawDataSize()); - assertEquals(expectStat, statistics.getTotalSize()); + assertThat(statistics.getRowCount()).isEqualTo(expectStat); + assertThat(statistics.getFileCount()).isEqualTo(expectStat); + assertThat(statistics.getRawDataSize()).isEqualTo(expectStat); + assertThat(statistics.getTotalSize()).isEqualTo(expectStat); } // ------ utils ------ diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogUdfITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogUdfITCase.java index c3efe604b34d9..45896775962fe 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogUdfITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/HiveCatalogUdfITCase.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hive.ql.udf.UDFYear; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -69,6 +68,7 @@ import java.util.stream.Stream; import static java.lang.String.format; +import static org.assertj.core.api.Assertions.assertThat; /** * IT case for HiveCatalog. TODO: move to flink-connector-hive-test end-to-end test module once it's @@ -223,7 +223,7 @@ private void testUdf(boolean batch) throws Exception { results = new ArrayList<>(results); results.sort(String::compareTo); - Assert.assertEquals(Arrays.asList("1,1,2,2", "2,2,4,4", "3,3,6,6"), results); + assertThat(results).isEqualTo(Arrays.asList("1,1,2,2", "2,2,4,4", "3,3,6,6")); } @Test @@ -246,8 +246,8 @@ public void testTimestampUDF() throws Exception { tableEnv.sqlQuery("select myyear(ts) as y from src") .execute() .collect()); - Assert.assertEquals(2, results.size()); - Assert.assertEquals("[+I[2013], +I[2019]]", results.toString()); + assertThat(results).hasSize(2); + assertThat(results.toString()).isEqualTo("[+I[2013], +I[2019]]"); } finally { tableEnv.executeSql("drop table src"); } @@ -273,8 +273,8 @@ public void testDateUDF() throws Exception { tableEnv.sqlQuery("select mymonth(dt) as m from src order by m") .execute() .collect()); - Assert.assertEquals(2, results.size()); - Assert.assertEquals("[+I[1], +I[3]]", results.toString()); + assertThat(results).hasSize(2); + assertThat(results.toString()).isEqualTo("[+I[1], +I[3]]"); } finally { tableEnv.executeSql("drop table src"); } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/factories/HiveCatalogFactoryTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/factories/HiveCatalogFactoryTest.java index c0fe191b35abf..b44519534e0b9 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/factories/HiveCatalogFactoryTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/factories/HiveCatalogFactoryTest.java @@ -29,7 +29,6 @@ import org.apache.flink.util.TestLogger; import org.apache.hadoop.hive.conf.HiveConf; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -48,9 +47,8 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.AssertionsForClassTypes.assertThatThrownBy; /** Test for {@link HiveCatalog} created by {@link HiveCatalogFactory}. */ public class HiveCatalogFactoryTest extends TestLogger { @@ -80,11 +78,11 @@ public void testCreateHiveCatalog() { FactoryUtil.createCatalog( catalogName, options, null, Thread.currentThread().getContextClassLoader()); - assertEquals( - "dummy-hms", - ((HiveCatalog) actualCatalog) - .getHiveConf() - .getVar(HiveConf.ConfVars.METASTOREURIS)); + assertThat( + ((HiveCatalog) actualCatalog) + .getHiveConf() + .getVar(HiveConf.ConfVars.METASTOREURIS)) + .isEqualTo("dummy-hms"); checkEquals(expectedCatalog, (HiveCatalog) actualCatalog); } @@ -112,7 +110,7 @@ public void testCreateHiveCatalogWithHadoopConfDir() throws IOException { catalogName, options, null, Thread.currentThread().getContextClassLoader()); checkEquals(expectedCatalog, (HiveCatalog) actualCatalog); - assertEquals(mapredVal, ((HiveCatalog) actualCatalog).getHiveConf().get(mapredKey)); + assertThat(((HiveCatalog) actualCatalog).getHiveConf().get(mapredKey)).isEqualTo(mapredVal); } @Test @@ -120,23 +118,19 @@ public void testCreateHiveCatalogWithIllegalHadoopConfDir() throws IOException { final String catalogName = "mycatalog"; final String hadoopConfDir = tempFolder.newFolder().getAbsolutePath(); + final Map options = new HashMap<>(); + options.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER); + options.put(HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(), CONF_DIR.getPath()); + options.put(HiveCatalogFactoryOptions.HADOOP_CONF_DIR.key(), hadoopConfDir); - try { - final Map options = new HashMap<>(); - options.put( - CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER); - options.put(HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(), CONF_DIR.getPath()); - options.put(HiveCatalogFactoryOptions.HADOOP_CONF_DIR.key(), hadoopConfDir); - - final Catalog actualCatalog = - FactoryUtil.createCatalog( - catalogName, - options, - null, - Thread.currentThread().getContextClassLoader()); - Assert.fail(); - } catch (ValidationException e) { - } + assertThatThrownBy( + () -> + FactoryUtil.createCatalog( + catalogName, + options, + null, + Thread.currentThread().getContextClassLoader())) + .isInstanceOf(ValidationException.class); } @Test @@ -188,7 +182,7 @@ public void testLoadHadoopConfigFromEnv() throws IOException { } // validate the result for (String key : customProps.keySet()) { - assertEquals(customProps.get(key), hiveConf.get(key, null)); + assertThat(hiveConf.get(key, null)).isEqualTo(customProps.get(key)); } } @@ -247,22 +241,22 @@ public void testCreateMultipleHiveCatalog() throws Exception { HiveCatalog catalog2 = (HiveCatalog) future2.get(); // verify we read our own props - assertEquals("val1", catalog1.getHiveConf().get("key")); - assertNotNull(catalog1.getHiveConf().get("conf1", null)); + assertThat(catalog1.getHiveConf().get("key")).isEqualTo("val1"); + assertThat(catalog1.getHiveConf().get("conf1", null)).isNotNull(); // verify we don't read props from other conf - assertNull(catalog1.getHiveConf().get("conf2", null)); + assertThat(catalog1.getHiveConf().get("conf2", null)).isNull(); // verify we read our own props - assertEquals("val2", catalog2.getHiveConf().get("key")); - assertNotNull(catalog2.getHiveConf().get("conf2", null)); + assertThat(catalog2.getHiveConf().get("key")).isEqualTo("val2"); + assertThat(catalog2.getHiveConf().get("conf2", null)).isNotNull(); // verify we don't read props from other conf - assertNull(catalog2.getHiveConf().get("conf1", null)); + assertThat(catalog2.getHiveConf().get("conf1", null)).isNull(); } private static void checkEquals(HiveCatalog c1, HiveCatalog c2) { // Only assert a few selected properties for now - assertEquals(c1.getName(), c2.getName()); - assertEquals(c1.getDefaultDatabase(), c2.getDefaultDatabase()); + assertThat(c2.getName()).isEqualTo(c1.getName()); + assertThat(c2.getDefaultDatabase()).isEqualTo(c1.getDefaultDatabase()); } private static void writeProperty(File file, String key, String value) throws IOException { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/util/HiveTableUtilTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/util/HiveTableUtilTest.java index 12b793843c92f..977fe3bdcc476 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/util/HiveTableUtilTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/catalog/hive/util/HiveTableUtilTest.java @@ -33,7 +33,7 @@ import java.util.Optional; import static org.apache.flink.table.expressions.ApiExpressionUtils.valueLiteral; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for HiveTableUtil. */ public class HiveTableUtilTest { @@ -64,12 +64,12 @@ public void testMakePartitionFilter() { DataTypes.BOOLEAN()); Optional filter = HiveTableUtil.makePartitionFilter(2, partColNames, Arrays.asList(p1Exp), hiveShim); - assertEquals("(p1 = 1)", filter.orElse(null)); + assertThat(filter.orElse(null)).isEqualTo("(p1 = 1)"); filter = HiveTableUtil.makePartitionFilter( 2, partColNames, Arrays.asList(p1Exp, p3Exp), hiveShim); - assertEquals("(p1 = 1) and (p3 = 1.1)", filter.orElse(null)); + assertThat(filter.orElse(null)).isEqualTo("(p1 = 1) and (p3 = 1.1)"); filter = HiveTableUtil.makePartitionFilter( @@ -82,6 +82,6 @@ public void testMakePartitionFilter() { Arrays.asList(p1Exp, p3Exp), DataTypes.BOOLEAN())), hiveShim); - assertEquals("(p2 = 'a') and ((p1 = 1) or (p3 = 1.1))", filter.orElse(null)); + assertThat(filter.orElse(null)).isEqualTo("(p2 = 'a') and ((p1 = 1) or (p3 = 1.1))"); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDAFTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDAFTest.java index 670651962ff59..d775f72e3e3ae 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDAFTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDAFTest.java @@ -32,7 +32,7 @@ import java.math.BigDecimal; import java.util.Arrays; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link HiveGenericUDAF}. */ public class HiveGenericUDAFTest { @@ -52,7 +52,7 @@ public void testUDAFMin() throws Exception { udf.merge(acc, Arrays.asList()); - assertEquals(1L, udf.getValue(acc)); + assertThat(udf.getValue(acc)).isEqualTo(1L); } @Test @@ -71,7 +71,7 @@ public void testUDAFSum() throws Exception { udf.merge(acc, Arrays.asList()); - assertEquals(6.1d, udf.getValue(acc)); + assertThat(udf.getValue(acc)).isEqualTo(6.1d); constantArgs = new Object[] {null}; @@ -87,7 +87,7 @@ public void testUDAFSum() throws Exception { udf.merge(acc, Arrays.asList()); - assertEquals(BigDecimal.valueOf(18.666), udf.getValue(acc)); + assertThat(udf.getValue(acc)).isEqualTo(BigDecimal.valueOf(18.666)); } @Test @@ -106,7 +106,7 @@ public void testUDAFCount() throws Exception { udf.merge(acc, Arrays.asList()); - assertEquals(3L, udf.getValue(acc)); + assertThat(udf.getValue(acc)).isEqualTo(3L); } private static HiveGenericUDAF init( diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java index 4f836941d862a..5cca17f793639 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java @@ -50,9 +50,9 @@ import static org.apache.flink.table.HiveVersionTestUtil.HIVE_230_OR_LATER; import static org.apache.flink.table.HiveVersionTestUtil.HIVE_310_OR_LATER; -import static org.hamcrest.MatcherAssert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.junit.Assert.assertEquals; /** Test for {@link HiveGenericUDF}. */ public class HiveGenericUDFTest { @@ -63,15 +63,15 @@ public void testAbs() { HiveGenericUDF udf = init(GenericUDFAbs.class, new Object[] {null}, new DataType[] {DataTypes.DOUBLE()}); - assertEquals(10.0d, udf.eval(-10.0d)); + assertThat(udf.eval(-10.0d)).isEqualTo(10.0d); udf = init(GenericUDFAbs.class, new Object[] {null}, new DataType[] {DataTypes.INT()}); - assertEquals(10, udf.eval(-10)); + assertThat(udf.eval(-10)).isEqualTo(10); udf = init(GenericUDFAbs.class, new Object[] {null}, new DataType[] {DataTypes.STRING()}); - assertEquals(10.0, udf.eval("-10.0")); + assertThat(udf.eval("-10.0")).isEqualTo(10.0); } @Test @@ -83,8 +83,8 @@ public void testAddMonths() throws Exception { new Object[] {null, 1}, new DataType[] {DataTypes.STRING(), DataTypes.INT()}); - assertEquals("2009-09-30", udf.eval("2009-08-31", 1)); - assertEquals("2009-09-30", udf.eval("2009-08-31 11:11:11", 1)); + assertThat(udf.eval("2009-08-31", 1)).isEqualTo("2009-09-30"); + assertThat(udf.eval("2009-08-31 11:11:11", 1)).isEqualTo("2009-09-30"); } @Test @@ -99,7 +99,7 @@ public void testDateFormat() throws Exception { new Object[] {null, constYear}, new DataType[] {DataTypes.STRING(), DataTypes.STRING()}); - assertEquals("2009", udf.eval("2009-08-31", constYear)); + assertThat(udf.eval("2009-08-31", constYear)).isEqualTo("2009"); udf = init( @@ -107,7 +107,7 @@ public void testDateFormat() throws Exception { new Object[] {null, constMonth}, new DataType[] {DataTypes.DATE(), DataTypes.STRING()}); - assertEquals("8", udf.eval(Date.valueOf("2019-08-31"), constMonth)); + assertThat(udf.eval(Date.valueOf("2019-08-31"), constMonth)).isEqualTo("8"); } @Test @@ -123,7 +123,7 @@ public void testDecode() { HiveSimpleUDF simpleUDF = HiveSimpleUDFTest.init(UDFUnhex.class, new DataType[] {DataTypes.STRING()}); - assertEquals("MySQL", udf.eval(simpleUDF.eval("4D7953514C"), constDecoding)); + assertThat(udf.eval(simpleUDF.eval("4D7953514C"), constDecoding)).isEqualTo("MySQL"); } @Test @@ -139,8 +139,8 @@ public void testCase() { DataTypes.STRING() }); - assertEquals("a", udf.eval("1", "1", "a", "b")); - assertEquals("b", udf.eval("2", "1", "a", "b")); + assertThat(udf.eval("1", "1", "a", "b")).isEqualTo("a"); + assertThat(udf.eval("2", "1", "a", "b")).isEqualTo("b"); } @Test @@ -151,7 +151,7 @@ public void testCeil() { new Object[] {null}, new DataType[] {DataTypes.DOUBLE()}); - assertEquals(0L, udf.eval(-0.1d)); + assertThat(udf.eval(-0.1d)).isEqualTo(0L); udf = init( @@ -159,7 +159,7 @@ public void testCeil() { new Object[] {null}, new DataType[] {DataTypes.DECIMAL(2, 1)}); - assertEquals(BigDecimal.valueOf(4), udf.eval(BigDecimal.valueOf(3.1d))); + assertThat(udf.eval(BigDecimal.valueOf(3.1d))).isEqualTo(BigDecimal.valueOf(4)); } @Test @@ -170,7 +170,7 @@ public void testCoalesce() { new Object[] {null, 1, null}, new DataType[] {DataTypes.INT(), DataTypes.INT(), DataTypes.INT()}); - assertEquals(1, udf.eval(null, 1, null)); + assertThat(udf.eval(null, 1, null)).isEqualTo(1); } @Test @@ -189,7 +189,7 @@ public void testDateDiff() DataTypes.VARCHAR(20), DataTypes.CHAR(20), }); - assertEquals(-4182, udf.eval(t1, t2)); + assertThat(udf.eval(t1, t2)).isEqualTo(-4182); udf = init( @@ -199,7 +199,7 @@ public void testDateDiff() DataTypes.DATE(), DataTypes.TIMESTAMP(), }); - assertEquals(-4182, udf.eval(Date.valueOf(d), Timestamp.valueOf(t2))); + assertThat(udf.eval(Date.valueOf(d), Timestamp.valueOf(t2))).isEqualTo(-4182); // Test invalid char length udf = @@ -210,7 +210,7 @@ public void testDateDiff() DataTypes.CHAR(2), DataTypes.VARCHAR(2), }); - assertEquals(null, udf.eval(t1, t2)); + assertThat(udf.eval(t1, t2)).isNull(); } @Test @@ -221,8 +221,8 @@ public void testArray() { new Object[] {null}, new DataType[] {DataTypes.ARRAY(DataTypes.INT())}); - assertEquals(6, udf.eval(1, 2, 3)); - assertEquals(6, udf.eval(new Integer[] {1, 2, 3})); + assertThat(udf.eval(1, 2, 3)).isEqualTo(6); + assertThat(udf.eval(new Integer[] {1, 2, 3})).isEqualTo(6); } @Test @@ -236,15 +236,16 @@ public void testMap() { new Object[] {null}, new DataType[] {DataTypes.VARCHAR(testInput.length())}); - assertEquals( - new HashMap() { - { - put("1", "1"); - put("2", "2"); - put("3", "3"); - } - }, - udf.eval(testInput)); + assertThat(udf.eval(testInput)) + .isEqualTo( + new HashMap() { + + { + put("1", "1"); + put("2", "2"); + put("3", "3"); + } + }); // test input as map and nested functions HiveGenericUDF udf2 = @@ -255,8 +256,8 @@ public void testMap() { Object[] result = (Object[]) udf2.eval(udf.eval(testInput)); - assertEquals(3, result.length); - assertThat(Arrays.asList(result), containsInAnyOrder("1", "2", "3")); + assertThat(result).hasSize(3); + assertThat(Arrays.asList(result)).satisfies(matching(containsInAnyOrder("1", "2", "3"))); } @Test @@ -269,7 +270,7 @@ public void testStruct() { Row result = (Row) udf.eval(1, "222", "3"); - assertEquals(Row.of(1, "22", "3"), result); + assertThat(result).isEqualTo(Row.of(1, "22", "3")); udf = init( @@ -282,7 +283,7 @@ public void testStruct() { DataTypes.FIELD("3", DataTypes.VARCHAR(10))) }); - assertEquals(3, udf.eval(result)); + assertThat(udf.eval(result)).isEqualTo(3); } private static HiveGenericUDF init( diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDTFTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDTFTest.java index bfcf791918414..2819fadff7634 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDTFTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDTFTest.java @@ -44,7 +44,7 @@ import java.util.Collections; import java.util.List; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link HiveGenericUDTF}. */ public class HiveGenericUDTFTest { @@ -62,7 +62,7 @@ public void testOverSumInt() throws Exception { udf.eval(5, 4); - assertEquals(Arrays.asList(Row.of(9), Row.of(9)), collector.result); + assertThat(collector.result).isEqualTo(Arrays.asList(Row.of(9), Row.of(9))); // Test empty input and empty output constantArgs = new Object[] {}; @@ -73,7 +73,7 @@ public void testOverSumInt() throws Exception { udf.eval(); - assertEquals(Arrays.asList(), collector.result); + assertThat(collector.result).isEqualTo(Arrays.asList()); } @Test @@ -86,9 +86,8 @@ public void testSplit() throws Exception { udf.eval("1,2,3,5"); - assertEquals( - Arrays.asList(Row.of("1"), Row.of("2"), Row.of("3"), Row.of("5")), - collector.result); + assertThat(collector.result) + .isEqualTo(Arrays.asList(Row.of("1"), Row.of("2"), Row.of("3"), Row.of("5"))); } @Test @@ -108,7 +107,7 @@ public void testStack() throws Exception { udf.eval(2, "a", "b", "c", "d"); - assertEquals(Arrays.asList(Row.of("a", "b"), Row.of("c", "d")), collector.result); + assertThat(collector.result).isEqualTo(Arrays.asList(Row.of("a", "b"), Row.of("c", "d"))); } @Test @@ -121,7 +120,8 @@ public void testArray() throws Exception { udf.eval(new Integer[] {1, 2, 3}); - assertEquals(Arrays.asList(Row.of(0, 1), Row.of(1, 2), Row.of(2, 3)), collector.result); + assertThat(collector.result) + .isEqualTo(Arrays.asList(Row.of(0, 1), Row.of(1, 2), Row.of(2, 3))); } @Test @@ -140,7 +140,7 @@ public void testStruct() throws Exception { udf.eval(new Row[] {Row.of(1, 2.2d), Row.of(3, 4.4d)}); - assertEquals(Arrays.asList(Row.of(1, 2.2), Row.of(3, 4.4)), collector.result); + assertThat(collector.result).isEqualTo(Arrays.asList(Row.of(1, 2.2), Row.of(3, 4.4))); } private static HiveGenericUDTF init( diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveSimpleUDFTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveSimpleUDFTest.java index 7ab0e5ba6a1d9..a0d74203673e2 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveSimpleUDFTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveSimpleUDFTest.java @@ -47,8 +47,7 @@ import java.util.List; import java.util.Optional; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link HiveSimpleUDF}. */ public class HiveSimpleUDFTest { @@ -57,25 +56,25 @@ public class HiveSimpleUDFTest { @Test public void testBooleanUDF() { HiveSimpleUDF udf = init(BooleanUDF.class, new DataType[] {DataTypes.INT()}); - assertTrue((boolean) udf.eval(1)); + assertThat((boolean) udf.eval(1)).isTrue(); } @Test public void testFloatUDF() { HiveSimpleUDF udf = init(FloatUDF.class, new DataType[] {DataTypes.FLOAT()}); - assertEquals(3.0f, (float) udf.eval(3.0f), 0); + assertThat((float) udf.eval(3.0f)).isEqualTo(3.0f); } @Test public void testIntUDF() { HiveSimpleUDF udf = init(IntUDF.class, new DataType[] {DataTypes.INT()}); - assertEquals(3, (int) udf.eval(3)); + assertThat((int) udf.eval(3)).isEqualTo(3); } @Test public void testStringUDF() { HiveSimpleUDF udf = init(StringUDF.class, new DataType[] {DataTypes.STRING()}); - assertEquals("test", udf.eval("test")); + assertThat(udf.eval("test")).isEqualTo("test"); } @Test @@ -84,14 +83,14 @@ public void testUDFRand() { double result = (double) udf.eval(); - assertTrue(result >= 0 && result < 1); + assertThat(result >= 0 && result < 1).isTrue(); } @Test public void testUDFBin() { HiveSimpleUDF udf = init(UDFBin.class, new DataType[] {DataTypes.INT()}); - assertEquals("1100", udf.eval(12)); + assertThat(udf.eval(12)).isEqualTo("1100"); } @Test @@ -101,8 +100,8 @@ public void testUDFConv() { UDFConv.class, new DataType[] {DataTypes.STRING(), DataTypes.INT(), DataTypes.INT()}); - assertEquals("1", udf.eval("12", 2, 10)); - assertEquals("-16", udf.eval(-10, 16, -10)); + assertThat(udf.eval("12", 2, 10)).isEqualTo("1"); + assertThat(udf.eval(-10, 16, -10)).isEqualTo("-16"); } @Test @@ -114,14 +113,14 @@ public void testUDFJson() { HiveSimpleUDF udf = init(UDFJson.class, new DataType[] {DataTypes.STRING(), DataTypes.STRING()}); - assertEquals(expected, udf.eval(json, pattern)); + assertThat(udf.eval(json, pattern)).isEqualTo(expected); udf = init( UDFJson.class, new DataType[] {DataTypes.CHAR(100), DataTypes.CHAR(pattern.length())}); - assertEquals(expected, udf.eval(json, pattern)); + assertThat(udf.eval(json, pattern)).isEqualTo(expected); udf = init( @@ -130,7 +129,7 @@ public void testUDFJson() { DataTypes.VARCHAR(100), DataTypes.VARCHAR(pattern.length()) }); - assertEquals(expected, udf.eval(json, pattern)); + assertThat(udf.eval(json, pattern)).isEqualTo(expected); // Test invalid CHAR length udf = @@ -143,17 +142,17 @@ public void testUDFJson() { }); // Cannot find path "$.owne" - assertEquals(null, udf.eval(json, pattern)); + assertThat(udf.eval(json, pattern)).isNull(); } @Test public void testUDFWeekOfYear() throws FlinkHiveUDFException { HiveSimpleUDF udf = init(UDFWeekOfYear.class, new DataType[] {DataTypes.STRING()}); - assertEquals(29, udf.eval("1969-07-20")); - assertEquals(29, udf.eval(Date.valueOf("1969-07-20"))); - assertEquals(29, udf.eval(Timestamp.valueOf("1969-07-20 00:00:00"))); - assertEquals(1, udf.eval("1980-12-31 12:59:59")); + assertThat(udf.eval("1969-07-20")).isEqualTo(29); + assertThat(udf.eval(Date.valueOf("1969-07-20"))).isEqualTo(29); + assertThat(udf.eval(Timestamp.valueOf("1969-07-20 00:00:00"))).isEqualTo(29); + assertThat(udf.eval("1980-12-31 12:59:59")).isEqualTo(1); } @Test @@ -163,28 +162,28 @@ public void testUDFRegExpExtract() { UDFRegExpExtract.class, new DataType[] {DataTypes.STRING(), DataTypes.STRING(), DataTypes.INT()}); - assertEquals("100", udf.eval("100-200", "(\\d+)-(\\d+)", 1)); + assertThat(udf.eval("100-200", "(\\d+)-(\\d+)", 1)).isEqualTo("100"); } @Test public void testUDFUnbase64() { HiveSimpleUDF udf = init(UDFBase64.class, new DataType[] {DataTypes.BYTES()}); - assertEquals("Cg==", udf.eval(new byte[] {10})); + assertThat(udf.eval(new byte[] {10})).isEqualTo("Cg=="); } @Test public void testUDFUnhex() throws UnsupportedEncodingException { HiveSimpleUDF udf = init(UDFUnhex.class, new DataType[] {DataTypes.STRING()}); - assertEquals("MySQL", new String((byte[]) udf.eval("4D7953514C"), "UTF-8")); + assertThat(new String((byte[]) udf.eval("4D7953514C"), "UTF-8")).isEqualTo("MySQL"); } @Test public void testUDFToInteger() { HiveSimpleUDF udf = init(UDFToInteger.class, new DataType[] {DataTypes.DECIMAL(5, 3)}); - assertEquals(1, udf.eval(BigDecimal.valueOf(1.1d))); + assertThat(udf.eval(BigDecimal.valueOf(1.1d))).isEqualTo(1); } @Test @@ -195,8 +194,8 @@ public void testUDFArray_singleArray() { HiveSimpleUDF udf = init(TestHiveUDFArray.class, new DataType[] {DataTypes.ARRAY(DataTypes.DOUBLE())}); - assertEquals(3, udf.eval(1.1d, 2.2d)); - assertEquals(3, udf.eval(testInputs)); + assertThat(udf.eval(1.1d, 2.2d)).isEqualTo(3); + assertThat(udf.eval(testInputs)).isEqualTo(3); // input is not a single array udf = @@ -204,7 +203,7 @@ public void testUDFArray_singleArray() { TestHiveUDFArray.class, new DataType[] {DataTypes.INT(), DataTypes.ARRAY(DataTypes.DOUBLE())}); - assertEquals(8, udf.eval(5, testInputs)); + assertThat(udf.eval(5, testInputs)).isEqualTo(8); udf = init( @@ -215,7 +214,7 @@ public void testUDFArray_singleArray() { DataTypes.ARRAY(DataTypes.DOUBLE()) }); - assertEquals(11, udf.eval(5, testInputs, testInputs)); + assertThat(udf.eval(5, testInputs, testInputs)).isEqualTo(11); } protected static HiveSimpleUDF init(Class hiveUdfClass, DataType[] argTypes) { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleFactoryTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleFactoryTest.java index 0c0129c62e1fb..f708000a365ab 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleFactoryTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleFactoryTest.java @@ -25,7 +25,7 @@ import java.util.Collections; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link HiveModuleFactory}. */ public class HiveModuleFactoryTest { @@ -44,6 +44,6 @@ public void test() { } private static void checkEquals(HiveModule m1, HiveModule m2) { - assertEquals(m1.getHiveVersion(), m2.getHiveVersion()); + assertThat(m2.getHiveVersion()).isEqualTo(m1.getHiveVersion()); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleTest.java index fe5b87d91c3dc..6567c22702ded 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/module/hive/HiveModuleTest.java @@ -38,9 +38,8 @@ import static org.apache.flink.table.catalog.hive.client.HiveShimLoader.HIVE_VERSION_V2_3_9; import static org.apache.flink.table.catalog.hive.client.HiveShimLoader.HIVE_VERSION_V3_1_1; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Test for {@link HiveModule}. */ public class HiveModuleTest { @@ -65,19 +64,19 @@ public void testNumberOfBuiltinFunctions() { verifyNumBuiltInFunctions(hiveVersion, hiveModule); // explicitly verify that HiveModule doesn't consider the created functions as built-in // functions - assertFalse(hiveModule.getFunctionDefinition("myudf").isPresent()); - assertFalse(hiveModule.getFunctionDefinition("mygenericudf").isPresent()); - assertFalse(hiveModule.getFunctionDefinition("myudaf").isPresent()); - assertFalse(hiveModule.getFunctionDefinition("myudtf").isPresent()); + assertThat(hiveModule.getFunctionDefinition("myudf")).isNotPresent(); + assertThat(hiveModule.getFunctionDefinition("mygenericudf")).isNotPresent(); + assertThat(hiveModule.getFunctionDefinition("myudaf")).isNotPresent(); + assertThat(hiveModule.getFunctionDefinition("myudtf")).isNotPresent(); } private void verifyNumBuiltInFunctions(String hiveVersion, HiveModule hiveModule) { switch (hiveVersion) { case HIVE_VERSION_V2_3_9: - assertEquals(275, hiveModule.listFunctions().size()); + assertThat(hiveModule.listFunctions()).hasSize(275); break; case HIVE_VERSION_V3_1_1: - assertEquals(294, hiveModule.listFunctions().size()); + assertThat(hiveModule.listFunctions()).hasSize(294); break; default: fail("Unknown test version " + hiveVersion); @@ -96,12 +95,12 @@ public void testHiveBuiltInFunction() { udf.open(null); - assertEquals("cba", udf.eval("abc")); + assertThat(udf.eval("abc")).isEqualTo("cba"); } @Test public void testNonExistFunction() { - assertFalse(new HiveModule().getFunctionDefinition("nonexist").isPresent()); + assertThat(new HiveModule().getFunctionDefinition("nonexist")).isNotPresent(); } @Test @@ -114,21 +113,21 @@ public void testConstantArguments() { List results = CollectionUtil.iteratorToList( tEnv.sqlQuery("select concat('an', 'bn')").execute().collect()); - assertEquals("[anbn]", results.toString()); + assertThat(results.toString()).isEqualTo("[anbn]"); results = CollectionUtil.iteratorToList( tEnv.sqlQuery("select concat('ab', cast('cdefghi' as varchar(5)))") .execute() .collect()); - assertEquals("[abcdefg]", results.toString()); + assertThat(results.toString()).isEqualTo("[abcdefg]"); results = CollectionUtil.iteratorToList( tEnv.sqlQuery("select concat('ab',cast(12.34 as decimal(10,5)))") .execute() .collect()); - assertEquals("[ab12.34]", results.toString()); + assertThat(results.toString()).isEqualTo("[ab12.34]"); results = CollectionUtil.iteratorToList( @@ -136,14 +135,14 @@ public void testConstantArguments() { "select concat(cast('2018-01-19' as date),cast('2019-12-27 17:58:23.385' as timestamp))") .execute() .collect()); - assertEquals("[2018-01-192019-12-27 17:58:23.385]", results.toString()); + assertThat(results.toString()).isEqualTo("[2018-01-192019-12-27 17:58:23.385]"); // TODO: null cannot be a constant argument at the moment. This test will make more sense // when that changes. results = CollectionUtil.iteratorToList( tEnv.sqlQuery("select concat('ab',cast(null as int))").execute().collect()); - assertEquals("[null]", results.toString()); + assertThat(results.toString()).isEqualTo("[null]"); } @Test @@ -157,15 +156,16 @@ public void testDecimalReturnType() { CollectionUtil.iteratorToList( tEnv.sqlQuery("select negative(5.1)").execute().collect()); - assertEquals("[-5.1]", results.toString()); + assertThat(results.toString()).isEqualTo("[-5.1]"); } @Test public void testBlackList() { HiveModule hiveModule = new HiveModule(); - assertFalse(hiveModule.listFunctions().removeAll(HiveModule.BUILT_IN_FUNC_BLACKLIST)); + assertThat(hiveModule.listFunctions().removeAll(HiveModule.BUILT_IN_FUNC_BLACKLIST)) + .isFalse(); for (String banned : HiveModule.BUILT_IN_FUNC_BLACKLIST) { - assertFalse(hiveModule.getFunctionDefinition(banned).isPresent()); + assertThat(hiveModule.getFunctionDefinition(banned)).isNotPresent(); } } @@ -182,7 +182,7 @@ public void testConstantReturnValue() { .execute() .collect()); - assertEquals("[{a=1, b=2, c=3}]", results.toString()); + assertThat(results.toString()).isEqualTo("[{a=1, b=2, c=3}]"); } @Test @@ -198,13 +198,13 @@ public void testEmptyStringLiteralParameters() { tableEnv.sqlQuery("select regexp_replace('foobar','oo|ar','')") .execute() .collect()); - assertEquals("[fb]", results.toString()); + assertThat(results.toString()).isEqualTo("[fb]"); // GenericUDF results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select length('')").execute().collect()); - assertEquals("[0]", results.toString()); + assertThat(results.toString()).isEqualTo("[0]"); } @Test @@ -220,7 +220,7 @@ public void testFunctionsNeedSessionState() { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select mod(-1,2),pmod(-1,2)").execute().collect()); - assertEquals("[-1,1]", results.toString()); + assertThat(results.toString()).isEqualTo("[-1,1]"); } @Test @@ -234,6 +234,6 @@ public void testCallUDFWithNoParam() { List results = CollectionUtil.iteratorToList( tableEnv.sqlQuery("select `array`(),`map`()").execute().collect()); - assertEquals("[[],{}]", results.toString()); + assertThat(results.toString()).isEqualTo("[[],{}]"); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/planner/delegation/hive/HiveASTParserTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/planner/delegation/hive/HiveASTParserTest.java index a2be080b9455f..db832bf4b8148 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/planner/delegation/hive/HiveASTParserTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/planner/delegation/hive/HiveASTParserTest.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.junit.Test; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for the AST parser. */ public class HiveASTParserTest { @@ -151,7 +151,7 @@ public void testConstraints() throws Exception { private void assertDDLType(int type, String... sqls) throws Exception { for (String sql : sqls) { HiveParserContext parserContext = new HiveParserContext(hiveConf); - assertEquals(type, HiveASTParseUtils.parse(sql, parserContext).getType()); + assertThat(HiveASTParseUtils.parse(sql, parserContext).getType()).isEqualTo(type); } } } From b7fa36d1f7b3e284e303f622b871933b5627eead Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 6 May 2022 11:10:22 +0200 Subject: [PATCH 05/16] [FLINK-27185][connector] Convert connector-jdbc module to assertj Co-authored-by: slinkydeveloper --- .../connector/jdbc/JdbcDataTypeTest.java | 14 +- .../flink/connector/jdbc/JdbcITCase.java | 6 +- .../connector/jdbc/JdbcInputFormatTest.java | 38 +- .../jdbc/JdbcRowOutputFormatTest.java | 75 ++-- .../jdbc/catalog/MySqlCatalogITCase.java | 46 +-- .../jdbc/catalog/PostgresCatalogITCase.java | 86 ++-- .../jdbc/catalog/PostgresCatalogTest.java | 57 ++- .../jdbc/catalog/PostgresTablePathTest.java | 7 +- .../factory/JdbcCatalogFactoryTest.java | 15 +- .../AbstractJdbcRowConverterTest.java | 9 +- .../oracle/OraclePreparedStatementTest.java | 55 +-- .../oracle/OracleTableSourceITCase.java | 15 +- .../connector/jdbc/internal/JdbcFullTest.java | 9 +- .../internal/JdbcTableOutputFormatTest.java | 13 +- ...derDriverClassConcurrentLoadingITCase.java | 11 +- .../SimpleJdbcConnectionProviderTest.java | 118 +++--- .../NumericBetweenParametersProviderTest.java | 6 +- .../FieldNamedPreparedStatementImplTest.java | 55 ++- .../table/JdbcDynamicTableFactoryTest.java | 83 ++-- .../table/JdbcDynamicTableSourceITCase.java | 15 +- .../jdbc/table/JdbcOutputFormatTest.java | 376 +++++++++--------- .../table/JdbcRowDataInputFormatTest.java | 40 +- .../table/JdbcRowDataLookupFunctionTest.java | 12 +- .../jdbc/utils/JdbcTypeUtilTest.java | 13 +- .../jdbc/xa/JdbcExactlyOnceSinkE2eTest.java | 8 +- .../jdbc/xa/JdbcXaFacadeImplTest.java | 6 +- .../jdbc/xa/JdbcXaFacadeTestHelper.java | 6 +- .../jdbc/xa/JdbcXaSinkDerbyTest.java | 6 +- .../connector/jdbc/xa/JdbcXaSinkH2Test.java | 25 +- .../jdbc/xa/JdbcXaSinkNoInsertionTest.java | 20 +- .../jdbc/xa/SemanticXidGeneratorTest.java | 4 +- .../flink/connector/jdbc/xa/XidImplTest.java | 33 +- 32 files changed, 629 insertions(+), 653 deletions(-) diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java index b19a7f8a39b5d..6dc47d27c5fb7 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java @@ -22,7 +22,6 @@ import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; -import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -32,7 +31,8 @@ import java.util.Arrays; import java.util.List; -import static org.junit.jupiter.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests for all DataTypes and Dialects of JDBC connector. */ @RunWith(Parameterized.class) @@ -170,7 +170,7 @@ public static List testData() { } private static TestItem createTestItem(Object... args) { - assert args.length >= 2; + assertThat(args.length).isGreaterThanOrEqualTo(2); TestItem item = TestItem.fromDialectAndType((String) args[0], (String) args[1]); if (args.length == 3) { item.withExpectError((String) args[2]); @@ -192,13 +192,13 @@ public void testDataTypeValidate() { if (testItem.expectError != null) { try { tEnv.sqlQuery("SELECT * FROM T"); - fail(); + fail("unknown failure"); } catch (ValidationException ex) { - Assert.assertEquals(testItem.expectError, ex.getCause().getMessage()); + assertThat(ex.getCause()).hasMessage(testItem.expectError); } catch (UnsupportedOperationException ex) { - Assert.assertEquals(testItem.expectError, ex.getMessage()); + assertThat(ex).hasMessage(testItem.expectError); } catch (Exception e) { - fail(e); + fail("unknown failure", e); } } else { tEnv.sqlQuery("SELECT * FROM T"); diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcITCase.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcITCase.java index 0b68f700c01f9..c24fc561326eb 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcITCase.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcITCase.java @@ -44,7 +44,7 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TestEntry; import static org.apache.flink.util.Preconditions.checkNotNull; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Smoke tests for the {@link JdbcSink} and the underlying classes. */ public class JdbcITCase extends JdbcTestBase { @@ -78,7 +78,7 @@ public void testInsert() throws Exception { .build())); env.execute(); - assertEquals(Arrays.asList(TEST_DATA), selectBooks()); + assertThat(selectBooks()).isEqualTo(Arrays.asList(TEST_DATA)); } @Test @@ -112,7 +112,7 @@ public void testObjectReuse() throws Exception { .build())); env.execute(); - assertEquals(Arrays.asList(words), selectWords()); + assertThat(selectWords()).isEqualTo(Arrays.asList(words)); } private List selectWords() throws SQLException { diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcInputFormatTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcInputFormatTest.java index 6aac58951c72e..856e6f4a4b4d7 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcInputFormatTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcInputFormatTest.java @@ -25,7 +25,6 @@ import org.apache.flink.types.Row; import org.junit.After; -import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -44,6 +43,7 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.SELECT_EMPTY; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TestEntry; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for the {@link JdbcInputFormat}. */ public class JdbcInputFormatTest extends JdbcDataTestBase { @@ -176,7 +176,7 @@ public void testDefaultFetchSizeIsUsedIfNotConfiguredOtherwise() .createStatement() .getFetchSize(); - Assert.assertEquals(defaultFetchSize, jdbcInputFormat.getStatement().getFetchSize()); + assertThat(jdbcInputFormat.getStatement().getFetchSize()).isEqualTo(defaultFetchSize); } @Test @@ -191,7 +191,7 @@ public void testFetchSizeCanBeConfigured() throws SQLException { .setFetchSize(desiredFetchSize) .finish(); jdbcInputFormat.openInputFormat(); - Assert.assertEquals(desiredFetchSize, jdbcInputFormat.getStatement().getFetchSize()); + assertThat(jdbcInputFormat.getStatement().getFetchSize()).isEqualTo(desiredFetchSize); } @Test @@ -211,7 +211,7 @@ public void testDefaultAutoCommitIsUsedIfNotConfiguredOtherwise() final boolean defaultAutoCommit = DriverManager.getConnection(DERBY_EBOOKSHOP_DB.getUrl()).getAutoCommit(); - Assert.assertEquals(defaultAutoCommit, jdbcInputFormat.getDbConn().getAutoCommit()); + assertThat(jdbcInputFormat.getDbConn().getAutoCommit()).isEqualTo(defaultAutoCommit); } @Test @@ -228,7 +228,7 @@ public void testAutoCommitCanBeConfigured() throws SQLException { .finish(); jdbcInputFormat.openInputFormat(); - Assert.assertEquals(desiredAutoCommit, jdbcInputFormat.getDbConn().getAutoCommit()); + assertThat(jdbcInputFormat.getDbConn().getAutoCommit()).isEqualTo(desiredAutoCommit); } @Test @@ -242,7 +242,7 @@ public void testJdbcInputFormatWithoutParallelism() throws IOException { .setResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE) .finish(); // this query does not exploit parallelism - Assert.assertEquals(1, jdbcInputFormat.createInputSplits(1).length); + assertThat(jdbcInputFormat.createInputSplits(1)).hasSize(1); jdbcInputFormat.openInputFormat(); jdbcInputFormat.open(null); Row row = new Row(5); @@ -256,7 +256,7 @@ public void testJdbcInputFormatWithoutParallelism() throws IOException { } jdbcInputFormat.close(); jdbcInputFormat.closeInputFormat(); - Assert.assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } @Test @@ -279,7 +279,7 @@ public void testJdbcInputFormatWithParallelismAndNumericColumnSplitting() throws jdbcInputFormat.openInputFormat(); InputSplit[] splits = jdbcInputFormat.createInputSplits(1); // this query exploit parallelism (1 split for every id) - Assert.assertEquals(TEST_DATA.length, splits.length); + assertThat(splits).hasSize(TEST_DATA.length); int recordCount = 0; Row row = new Row(5); for (InputSplit split : splits) { @@ -294,7 +294,7 @@ public void testJdbcInputFormatWithParallelismAndNumericColumnSplitting() throws jdbcInputFormat.close(); } jdbcInputFormat.closeInputFormat(); - Assert.assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } @Test @@ -318,7 +318,7 @@ public void testJdbcInputFormatWithoutParallelismAndNumericColumnSplitting() jdbcInputFormat.openInputFormat(); InputSplit[] splits = jdbcInputFormat.createInputSplits(1); // assert that a single split was generated - Assert.assertEquals(1, splits.length); + assertThat(splits).hasSize(1); int recordCount = 0; Row row = new Row(5); for (InputSplit split : splits) { @@ -333,7 +333,7 @@ public void testJdbcInputFormatWithoutParallelismAndNumericColumnSplitting() jdbcInputFormat.close(); } jdbcInputFormat.closeInputFormat(); - Assert.assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } @Test @@ -356,7 +356,7 @@ public void testJdbcInputFormatWithParallelismAndGenericSplitting() throws IOExc jdbcInputFormat.openInputFormat(); InputSplit[] splits = jdbcInputFormat.createInputSplits(1); // this query exploit parallelism (1 split for every queryParameters row) - Assert.assertEquals(queryParameters.length, splits.length); + assertThat(splits).hasSize(queryParameters.length); verifySplit(splits[0], TEST_DATA[3].id); verifySplit(splits[1], TEST_DATA[0].id + TEST_DATA[1].id); @@ -379,7 +379,7 @@ private void verifySplit(InputSplit split, int expectedIDSum) throws IOException sum += id; } - Assert.assertEquals(expectedIDSum, sum); + assertThat(sum).isEqualTo(expectedIDSum); } @Test @@ -395,7 +395,7 @@ public void testEmptyResults() throws IOException { try { jdbcInputFormat.openInputFormat(); jdbcInputFormat.open(null); - Assert.assertTrue(jdbcInputFormat.reachedEnd()); + assertThat(jdbcInputFormat.reachedEnd()).isTrue(); } finally { jdbcInputFormat.close(); jdbcInputFormat.closeInputFormat(); @@ -403,10 +403,10 @@ public void testEmptyResults() throws IOException { } private static void assertEquals(TestEntry expected, Row actual) { - Assert.assertEquals(expected.id, actual.getField(0)); - Assert.assertEquals(expected.title, actual.getField(1)); - Assert.assertEquals(expected.author, actual.getField(2)); - Assert.assertEquals(expected.price, actual.getField(3)); - Assert.assertEquals(expected.qty, actual.getField(4)); + assertThat(actual.getField(0)).isEqualTo(expected.id); + assertThat(actual.getField(1)).isEqualTo(expected.title); + assertThat(actual.getField(2)).isEqualTo(expected.author); + assertThat(actual.getField(3)).isEqualTo(expected.price); + assertThat(actual.getField(4)).isEqualTo(expected.qty); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java index b08537f7e126b..13142b93b58f7 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java @@ -46,10 +46,8 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.TestEntry; import static org.apache.flink.util.ExceptionUtils.findThrowable; import static org.apache.flink.util.ExceptionUtils.findThrowableWithMessage; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests for the {@link JdbcRowOutputFormat}. */ public class JdbcRowOutputFormatTest extends JdbcDataTestBase { @@ -76,8 +74,8 @@ public void testInvalidDriver() { .finish(); jdbcOutputFormat.open(0, 1); } catch (Exception e) { - assertTrue(findThrowable(e, IOException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, IOException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -94,8 +92,8 @@ public void testInvalidURL() { jdbcOutputFormat.open(0, 1); fail("expect exception"); } catch (Exception e) { - assertTrue(findThrowable(e, IOException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, IOException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -112,8 +110,8 @@ public void testInvalidQuery() { setRuntimeContext(jdbcOutputFormat, true); jdbcOutputFormat.open(0, 1); } catch (Exception e) { - assertTrue(findThrowable(e, IOException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, IOException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -127,8 +125,8 @@ public void testIncompleteConfiguration() { .setQuery(String.format(INSERT_TEMPLATE, INPUT_TABLE)) .finish(); } catch (Exception e) { - assertTrue(findThrowable(e, NullPointerException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, NullPointerException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -155,8 +153,8 @@ public void testIncompatibleTypes() { jdbcOutputFormat.writeRecord(row); jdbcOutputFormat.close(); } catch (Exception e) { - assertTrue(findThrowable(e, SQLDataException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, SQLDataException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -191,8 +189,8 @@ public void testExceptionOnInvalidType() { jdbcOutputFormat.writeRecord(row); jdbcOutputFormat.close(); } catch (Exception e) { - assertTrue(findThrowable(e, ClassCastException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, ClassCastException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -230,8 +228,8 @@ public void testExceptionOnClose() { jdbcOutputFormat.close(); } catch (Exception e) { - assertTrue(findThrowable(e, RuntimeException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, RuntimeException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -257,15 +255,15 @@ public void testJdbcOutputFormat() throws IOException, SQLException { ResultSet resultSet = statement.executeQuery()) { int recordCount = 0; while (resultSet.next()) { - assertEquals(TEST_DATA[recordCount].id, resultSet.getObject("id")); - assertEquals(TEST_DATA[recordCount].title, resultSet.getObject("title")); - assertEquals(TEST_DATA[recordCount].author, resultSet.getObject("author")); - assertEquals(TEST_DATA[recordCount].price, resultSet.getObject("price")); - assertEquals(TEST_DATA[recordCount].qty, resultSet.getObject("qty")); + assertThat(resultSet.getObject("id")).isEqualTo(TEST_DATA[recordCount].id); + assertThat(resultSet.getObject("title")).isEqualTo(TEST_DATA[recordCount].title); + assertThat(resultSet.getObject("author")).isEqualTo(TEST_DATA[recordCount].author); + assertThat(resultSet.getObject("price")).isEqualTo(TEST_DATA[recordCount].price); + assertThat(resultSet.getObject("qty")).isEqualTo(TEST_DATA[recordCount].qty); recordCount++; } - assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } } @@ -286,20 +284,23 @@ public void testFlush() throws SQLException, IOException { jdbcOutputFormat.writeRecord(toRow(TEST_DATA[i])); } try (ResultSet resultSet = statement.executeQuery()) { - assertFalse(resultSet.next()); + assertThat(resultSet.next()).isFalse(); } jdbcOutputFormat.writeRecord(toRow(TEST_DATA[2])); try (ResultSet resultSet = statement.executeQuery()) { int recordCount = 0; while (resultSet.next()) { - assertEquals(TEST_DATA[recordCount].id, resultSet.getObject("id")); - assertEquals(TEST_DATA[recordCount].title, resultSet.getObject("title")); - assertEquals(TEST_DATA[recordCount].author, resultSet.getObject("author")); - assertEquals(TEST_DATA[recordCount].price, resultSet.getObject("price")); - assertEquals(TEST_DATA[recordCount].qty, resultSet.getObject("qty")); + assertThat(resultSet.getObject("id")).isEqualTo(TEST_DATA[recordCount].id); + assertThat(resultSet.getObject("title")) + .isEqualTo(TEST_DATA[recordCount].title); + assertThat(resultSet.getObject("author")) + .isEqualTo(TEST_DATA[recordCount].author); + assertThat(resultSet.getObject("price")) + .isEqualTo(TEST_DATA[recordCount].price); + assertThat(resultSet.getObject("qty")).isEqualTo(TEST_DATA[recordCount].qty); recordCount++; } - assertEquals(3, recordCount); + assertThat(recordCount).isEqualTo(3); } } finally { jdbcOutputFormat.close(); @@ -336,15 +337,15 @@ public void testInvalidConnectionInJdbcOutputFormat() throws IOException, SQLExc ResultSet resultSet = statement.executeQuery()) { int recordCount = 0; while (resultSet.next()) { - assertEquals(TEST_DATA[recordCount].id, resultSet.getObject("id")); - assertEquals(TEST_DATA[recordCount].title, resultSet.getObject("title")); - assertEquals(TEST_DATA[recordCount].author, resultSet.getObject("author")); - assertEquals(TEST_DATA[recordCount].price, resultSet.getObject("price")); - assertEquals(TEST_DATA[recordCount].qty, resultSet.getObject("qty")); + assertThat(resultSet.getObject("id")).isEqualTo(TEST_DATA[recordCount].id); + assertThat(resultSet.getObject("title")).isEqualTo(TEST_DATA[recordCount].title); + assertThat(resultSet.getObject("author")).isEqualTo(TEST_DATA[recordCount].author); + assertThat(resultSet.getObject("price")).isEqualTo(TEST_DATA[recordCount].price); + assertThat(resultSet.getObject("qty")).isEqualTo(TEST_DATA[recordCount].qty); recordCount++; } - assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/MySqlCatalogITCase.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/MySqlCatalogITCase.java index 0e2c48656e3e1..b9720ad5e7644 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/MySqlCatalogITCase.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/MySqlCatalogITCase.java @@ -38,15 +38,12 @@ import java.sql.Time; import java.sql.Timestamp; import java.util.Arrays; -import java.util.Collections; import java.util.List; import static org.apache.flink.core.testutils.FlinkAssertions.anyCauseMatches; import static org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM; +import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; /** E2E test for {@link MySqlCatalog}. */ public class MySqlCatalogITCase extends MySqlCatalogTestBase { @@ -179,14 +176,14 @@ public void testGetDb_DatabaseNotExistException() throws Exception { @Test public void testListDatabases() { List actual = catalog.listDatabases(); - assertEquals(Collections.singletonList(TEST_DB), actual); + assertThat(actual).containsExactly(TEST_DB); } @Test public void testDbExists() throws Exception { String databaseNotExist = "nonexistent"; - assertFalse(catalog.databaseExists(databaseNotExist)); - assertTrue(catalog.databaseExists(TEST_DB)); + assertThat(catalog.databaseExists(databaseNotExist)).isFalse(); + assertThat(catalog.databaseExists(TEST_DB)).isTrue(); } // ------ tables ------ @@ -194,12 +191,12 @@ public void testDbExists() throws Exception { @Test public void testListTables() throws DatabaseNotExistException { List actual = catalog.listTables(TEST_DB); - assertEquals( - Arrays.asList( - TEST_TABLE_ALL_TYPES, - TEST_SINK_TABLE_ALL_TYPES, - TEST_TABLE_SINK_FROM_GROUPED_BY), - actual); + assertThat(actual) + .isEqualTo( + Arrays.asList( + TEST_TABLE_ALL_TYPES, + TEST_SINK_TABLE_ALL_TYPES, + TEST_TABLE_SINK_FROM_GROUPED_BY)); } @Test @@ -216,8 +213,8 @@ public void testListTables_DatabaseNotExistException() throws DatabaseNotExistEx @Test public void testTableExists() { String tableNotExist = "nonexist"; - assertFalse(catalog.tableExists(new ObjectPath(TEST_DB, tableNotExist))); - assertTrue(catalog.tableExists(new ObjectPath(TEST_DB, TEST_TABLE_ALL_TYPES))); + assertThat(catalog.tableExists(new ObjectPath(TEST_DB, tableNotExist))).isFalse(); + assertThat(catalog.tableExists(new ObjectPath(TEST_DB, TEST_TABLE_ALL_TYPES))).isTrue(); } @Test @@ -248,7 +245,7 @@ public void testGetTables_TableNotExistException_NoDb() throws TableNotExistExce @Test public void testGetTable() throws TableNotExistException { CatalogBaseTable table = catalog.getTable(new ObjectPath(TEST_DB, TEST_TABLE_ALL_TYPES)); - assertEquals(TABLE_SCHEMA, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(TABLE_SCHEMA); } // ------ test select query. ------ @@ -260,9 +257,10 @@ public void testSelectField() { tEnv.sqlQuery(String.format("select pid from %s", TEST_TABLE_ALL_TYPES)) .execute() .collect()); - assertEquals( - Lists.newArrayList(Row.ofKind(RowKind.INSERT, 1L), Row.ofKind(RowKind.INSERT, 2L)), - results); + assertThat(results) + .isEqualTo( + Lists.newArrayList( + Row.ofKind(RowKind.INSERT, 1L), Row.ofKind(RowKind.INSERT, 2L))); } @Test @@ -273,7 +271,7 @@ public void testWithoutCatalogDB() { .execute() .collect()); - assertEquals(ALL_TYPES_ROWS, results); + assertThat(results).isEqualTo(ALL_TYPES_ROWS); } @Test @@ -286,7 +284,7 @@ public void testWithoutCatalog() { TEST_DB, TEST_TABLE_ALL_TYPES)) .execute() .collect()); - assertEquals(ALL_TYPES_ROWS, results); + assertThat(results).isEqualTo(ALL_TYPES_ROWS); } @Test @@ -301,7 +299,7 @@ public void testFullPath() { TEST_TABLE_ALL_TYPES)) .execute() .collect()); - assertEquals(ALL_TYPES_ROWS, results); + assertThat(results).isEqualTo(ALL_TYPES_ROWS); } @Test @@ -318,7 +316,7 @@ public void testSelectToInsert() throws Exception { tEnv.sqlQuery(String.format("select * from %s", TEST_SINK_TABLE_ALL_TYPES)) .execute() .collect()); - assertEquals(ALL_TYPES_ROWS, results); + assertThat(results).isEqualTo(ALL_TYPES_ROWS); } @Test @@ -339,6 +337,6 @@ public void testGroupByInsert() throws Exception { TEST_TABLE_SINK_FROM_GROUPED_BY)) .execute() .collect()); - assertEquals(Lists.newArrayList(Row.ofKind(RowKind.INSERT, 2L, -1L)), results); + assertThat(results).isEqualTo(Lists.newArrayList(Row.ofKind(RowKind.INSERT, 2L, -1L))); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogITCase.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogITCase.java index 48e44b5d17867..6a3339a86ca10 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogITCase.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogITCase.java @@ -30,7 +30,7 @@ import static org.apache.flink.connector.jdbc.catalog.PostgresCatalog.DEFAULT_DATABASE; import static org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** E2E test for {@link PostgresCatalog}. */ public class PostgresCatalogITCase extends PostgresCatalogTestBase { @@ -54,7 +54,7 @@ public void testSelectField() { tEnv.sqlQuery(String.format("select id from %s", TABLE1)) .execute() .collect()); - assertEquals("[+I[1]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1]]"); } @Test @@ -64,7 +64,7 @@ public void testWithoutSchema() { tEnv.sqlQuery(String.format("select * from %s", TABLE1)) .execute() .collect()); - assertEquals("[+I[1]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1]]"); } @Test @@ -77,7 +77,7 @@ public void testWithSchema() { PostgresTablePath.fromFlinkTableName(TABLE1))) .execute() .collect()); - assertEquals("[+I[1]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1]]"); } @Test @@ -92,7 +92,7 @@ public void testFullPath() { PostgresTablePath.fromFlinkTableName(TABLE1))) .execute() .collect()); - assertEquals("[+I[1]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1]]"); } @Test @@ -104,7 +104,7 @@ public void testInsert() throws Exception { tEnv.sqlQuery(String.format("select * from %s", TABLE4)) .execute() .collect()); - assertEquals("[+I[1]]", results.toString()); + assertThat(results.toString()).isEqualTo("[+I[1]]"); } @Test @@ -125,9 +125,9 @@ public void testGroupByInsert() throws Exception { tEnv.sqlQuery(String.format("select * from `%s`", TABLE_PRIMITIVE_TYPE2)) .execute() .collect()); - assertEquals( - "[+I[1, [52, 49], 3, 4, 5.5, 6.6, 7.70000, 8.8, true, a, B, C , d, 2016-06-22T19:10:25, 2015-01-01, 00:51:03, 500.000000000000000000]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[1, [52, 49], 3, 4, 5.5, 6.6, 7.70000, 8.8, true, a, B, C , d, 2016-06-22T19:10:25, 2015-01-01, 00:51:03, 500.000000000000000000]]"); } @Test @@ -138,9 +138,9 @@ public void testPrimitiveTypes() { .execute() .collect()); - assertEquals( - "[+I[1, [50], 3, 4, 5.5, 6.6, 7.70000, 8.8, true, a, b, c , d, 2016-06-22T19:10:25, 2015-01-01, 00:51:03, 500.000000000000000000]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[1, [50], 3, 4, 5.5, 6.6, 7.70000, 8.8, true, a, b, c , d, 2016-06-22T19:10:25, 2015-01-01, 00:51:03, 500.000000000000000000]]"); } @Test @@ -151,28 +151,28 @@ public void testArrayTypes() { .execute() .collect()); - assertEquals( - "[+I[" - + "[1, 2, 3], " - + "[[50], [51], [52]], " - + "[3, 4, 5], " - + "[4, 5, 6], " - + "[5.5, 6.6, 7.7], " - + "[6.6, 7.7, 8.8], " - + "[7.70000, 8.80000, 9.90000], " - + "[8.800000000000000000, 9.900000000000000000, 10.100000000000000000], " - + "[9.90, 10.10, 11.11], " - + "[true, false, true], " - + "[a, b, c], " - + "[b, c, d], " - + "[b , c , d ], " - + "[b, c, d], " - + "[2016-06-22T19:10:25, 2019-06-22T19:10:25], " - + "[2015-01-01, 2020-01-01], " - + "[00:51:03, 00:59:03], " - + "null, " - + "null]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[" + + "[1, 2, 3], " + + "[[50], [51], [52]], " + + "[3, 4, 5], " + + "[4, 5, 6], " + + "[5.5, 6.6, 7.7], " + + "[6.6, 7.7, 8.8], " + + "[7.70000, 8.80000, 9.90000], " + + "[8.800000000000000000, 9.900000000000000000, 10.100000000000000000], " + + "[9.90, 10.10, 11.11], " + + "[true, false, true], " + + "[a, b, c], " + + "[b, c, d], " + + "[b , c , d ], " + + "[b, c, d], " + + "[2016-06-22T19:10:25, 2019-06-22T19:10:25], " + + "[2015-01-01, 2020-01-01], " + + "[00:51:03, 00:59:03], " + + "null, " + + "null]]"); } @Test @@ -183,14 +183,14 @@ public void testSerialTypes() { .execute() .collect()); - assertEquals( - "[+I[" - + "32767, " - + "2147483647, " - + "32767, " - + "2147483647, " - + "9223372036854775807, " - + "9223372036854775807]]", - results.toString()); + assertThat(results.toString()) + .isEqualTo( + "[+I[" + + "32767, " + + "2147483647, " + + "32767, " + + "2147483647, " + + "9223372036854775807, " + + "9223372036854775807]]"); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogTest.java index c1f5d93de7bff..a5a9419123daf 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresCatalogTest.java @@ -29,9 +29,7 @@ import java.util.Arrays; import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link PostgresCatalog}. */ public class PostgresCatalogTest extends PostgresCatalogTestBase { @@ -49,14 +47,14 @@ public void testGetDb_DatabaseNotExistException() throws Exception { public void testListDatabases() { List actual = catalog.listDatabases(); - assertEquals(Arrays.asList("postgres", "test"), actual); + assertThat(actual).isEqualTo(Arrays.asList("postgres", "test")); } @Test public void testDbExists() throws Exception { - assertFalse(catalog.databaseExists("nonexistent")); + assertThat(catalog.databaseExists("nonexistent")).isFalse(); - assertTrue(catalog.databaseExists(PostgresCatalog.DEFAULT_DATABASE)); + assertThat(catalog.databaseExists(PostgresCatalog.DEFAULT_DATABASE)).isTrue(); } // ------ tables ------ @@ -65,20 +63,20 @@ public void testDbExists() throws Exception { public void testListTables() throws DatabaseNotExistException { List actual = catalog.listTables(PostgresCatalog.DEFAULT_DATABASE); - assertEquals( - Arrays.asList( - "public.array_table", - "public.primitive_table", - "public.primitive_table2", - "public.serial_table", - "public.t1", - "public.t4", - "public.t5"), - actual); + assertThat(actual) + .isEqualTo( + Arrays.asList( + "public.array_table", + "public.primitive_table", + "public.primitive_table2", + "public.serial_table", + "public.t1", + "public.t4", + "public.t5")); actual = catalog.listTables(TEST_DB); - assertEquals(Arrays.asList("public.t2", "test_schema.t3"), actual); + assertThat(actual).isEqualTo(Arrays.asList("public.t2", "test_schema.t3")); } @Test @@ -89,11 +87,12 @@ public void testListTables_DatabaseNotExistException() throws DatabaseNotExistEx @Test public void testTableExists() { - assertFalse(catalog.tableExists(new ObjectPath(TEST_DB, "nonexist"))); + assertThat(catalog.tableExists(new ObjectPath(TEST_DB, "nonexist"))).isFalse(); - assertTrue(catalog.tableExists(new ObjectPath(PostgresCatalog.DEFAULT_DATABASE, TABLE1))); - assertTrue(catalog.tableExists(new ObjectPath(TEST_DB, TABLE2))); - assertTrue(catalog.tableExists(new ObjectPath(TEST_DB, "test_schema.t3"))); + assertThat(catalog.tableExists(new ObjectPath(PostgresCatalog.DEFAULT_DATABASE, TABLE1))) + .isTrue(); + assertThat(catalog.tableExists(new ObjectPath(TEST_DB, TABLE2))).isTrue(); + assertThat(catalog.tableExists(new ObjectPath(TEST_DB, "test_schema.t3"))).isTrue(); } @Test @@ -128,25 +127,25 @@ public void testGetTable() CatalogBaseTable table = catalog.getTable(new ObjectPath("postgres", TABLE1)); - assertEquals(schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(schema); table = catalog.getTable(new ObjectPath("postgres", "public.t1")); - assertEquals(schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(schema); // test testdb.public.user2 table = catalog.getTable(new ObjectPath(TEST_DB, TABLE2)); - assertEquals(schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(schema); table = catalog.getTable(new ObjectPath(TEST_DB, "public.t2")); - assertEquals(schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(schema); // test testdb.testschema.user2 table = catalog.getTable(new ObjectPath(TEST_DB, TEST_SCHEMA + ".t3")); - assertEquals(schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(schema); } @Test @@ -155,7 +154,7 @@ public void testPrimitiveDataTypes() throws TableNotExistException { catalog.getTable( new ObjectPath(PostgresCatalog.DEFAULT_DATABASE, TABLE_PRIMITIVE_TYPE)); - assertEquals(getPrimitiveTable().schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(getPrimitiveTable().schema); } @Test @@ -164,7 +163,7 @@ public void testArrayDataTypes() throws TableNotExistException { catalog.getTable( new ObjectPath(PostgresCatalog.DEFAULT_DATABASE, TABLE_ARRAY_TYPE)); - assertEquals(getArrayTable().schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(getArrayTable().schema); } @Test @@ -173,6 +172,6 @@ public void testSerialDataTypes() throws TableNotExistException { catalog.getTable( new ObjectPath(PostgresCatalog.DEFAULT_DATABASE, TABLE_SERIAL_TYPE)); - assertEquals(getSerialTable().schema, table.getUnresolvedSchema()); + assertThat(table.getUnresolvedSchema()).isEqualTo(getSerialTable().schema); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresTablePathTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresTablePathTest.java index 1370df775fde9..db63c349f0044 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresTablePathTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/PostgresTablePathTest.java @@ -20,14 +20,13 @@ import org.junit.Test; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link PostgresTablePath}. */ public class PostgresTablePathTest { @Test public void testFromFlinkTableName() { - assertEquals( - new PostgresTablePath("public", "topic"), - PostgresTablePath.fromFlinkTableName("public.topic")); + assertThat(PostgresTablePath.fromFlinkTableName("public.topic")) + .isEqualTo(new PostgresTablePath("public", "topic")); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/factory/JdbcCatalogFactoryTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/factory/JdbcCatalogFactoryTest.java index f8b509de5e66b..2da31122747b1 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/factory/JdbcCatalogFactoryTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/catalog/factory/JdbcCatalogFactoryTest.java @@ -38,8 +38,7 @@ import java.util.HashMap; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link JdbcCatalogFactory}. */ public class JdbcCatalogFactoryTest { @@ -98,14 +97,14 @@ public void test() { checkEquals(catalog, (JdbcCatalog) actualCatalog); - assertTrue(((JdbcCatalog) actualCatalog).getInternal() instanceof PostgresCatalog); + assertThat(((JdbcCatalog) actualCatalog).getInternal()).isInstanceOf(PostgresCatalog.class); } private static void checkEquals(JdbcCatalog c1, JdbcCatalog c2) { - assertEquals(c1.getName(), c2.getName()); - assertEquals(c1.getDefaultDatabase(), c2.getDefaultDatabase()); - assertEquals(c1.getUsername(), c2.getUsername()); - assertEquals(c1.getPassword(), c2.getPassword()); - assertEquals(c1.getBaseUrl(), c2.getBaseUrl()); + assertThat(c2.getName()).isEqualTo(c1.getName()); + assertThat(c2.getDefaultDatabase()).isEqualTo(c1.getDefaultDatabase()); + assertThat(c2.getUsername()).isEqualTo(c1.getUsername()); + assertThat(c2.getPassword()).isEqualTo(c1.getPassword()); + assertThat(c2.getBaseUrl()).isEqualTo(c1.getBaseUrl()); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/converter/AbstractJdbcRowConverterTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/converter/AbstractJdbcRowConverterTest.java index 19e80b3a9da9f..670b39c6064c5 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/converter/AbstractJdbcRowConverterTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/converter/AbstractJdbcRowConverterTest.java @@ -29,7 +29,7 @@ import java.sql.ResultSet; import java.time.LocalDateTime; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link AbstractJdbcRowConverter}. */ public class AbstractJdbcRowConverterTest { @@ -54,9 +54,8 @@ public String converterName() { .thenReturn(LocalDateTime.parse("2021-04-07T00:00:05.999")); RowData res = rowConverter.toInternal(resultSet); - assertEquals(123, res.getInt(0)); - assertEquals( - LocalDateTime.parse("2021-04-07T00:00:05.999"), - res.getTimestamp(1, 3).toLocalDateTime()); + assertThat(res.getInt(0)).isEqualTo(123); + assertThat(res.getTimestamp(1, 3).toLocalDateTime()) + .isEqualTo(LocalDateTime.parse("2021-04-07T00:00:05.999")); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OraclePreparedStatementTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OraclePreparedStatementTest.java index 32f98ba1f5169..cc7611470d959 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OraclePreparedStatementTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OraclePreparedStatementTest.java @@ -30,7 +30,7 @@ import static java.util.Arrays.asList; import static java.util.Collections.singletonList; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link OraclePreparedStatementTest}. */ public class OraclePreparedStatementTest { @@ -44,10 +44,10 @@ public class OraclePreparedStatementTest { @Test public void testInsertStatement() { String insertStmt = dialect.getInsertIntoStatement(tableName, fieldNames); - assertEquals( - "INSERT INTO tbl(id, name, email, ts, field1, field_2, __field_3__) " - + "VALUES (:id, :name, :email, :ts, :field1, :field_2, :__field_3__)", - insertStmt); + assertThat(insertStmt) + .isEqualTo( + "INSERT INTO tbl(id, name, email, ts, field1, field_2, __field_3__) " + + "VALUES (:id, :name, :email, :ts, :field1, :field_2, :__field_3__)"); NamedStatementMatcher.parsedSql( "INSERT INTO tbl(id, name, email, ts, field1, field_2, __field_3__) " + "VALUES (?, ?, ?, ?, ?, ?, ?)") @@ -64,7 +64,8 @@ public void testInsertStatement() { @Test public void testDeleteStatement() { String deleteStmt = dialect.getDeleteStatement(tableName, keyFields); - assertEquals("DELETE FROM tbl WHERE id = :id AND __field_3__ = :__field_3__", deleteStmt); + assertThat(deleteStmt) + .isEqualTo("DELETE FROM tbl WHERE id = :id AND __field_3__ = :__field_3__"); NamedStatementMatcher.parsedSql("DELETE FROM tbl WHERE id = ? AND __field_3__ = ?") .parameter("id", singletonList(1)) .parameter("__field_3__", singletonList(2)) @@ -74,8 +75,8 @@ public void testDeleteStatement() { @Test public void testRowExistsStatement() { String rowExistStmt = dialect.getRowExistsStatement(tableName, keyFields); - assertEquals( - "SELECT 1 FROM tbl WHERE id = :id AND __field_3__ = :__field_3__", rowExistStmt); + assertThat(rowExistStmt) + .isEqualTo("SELECT 1 FROM tbl WHERE id = :id AND __field_3__ = :__field_3__"); NamedStatementMatcher.parsedSql("SELECT 1 FROM tbl WHERE id = ? AND __field_3__ = ?") .parameter("id", singletonList(1)) .parameter("__field_3__", singletonList(2)) @@ -85,11 +86,11 @@ public void testRowExistsStatement() { @Test public void testUpdateStatement() { String updateStmt = dialect.getUpdateStatement(tableName, fieldNames, keyFields); - assertEquals( - "UPDATE tbl SET id = :id, name = :name, email = :email, ts = :ts, " - + "field1 = :field1, field_2 = :field_2, __field_3__ = :__field_3__ " - + "WHERE id = :id AND __field_3__ = :__field_3__", - updateStmt); + assertThat(updateStmt) + .isEqualTo( + "UPDATE tbl SET id = :id, name = :name, email = :email, ts = :ts, " + + "field1 = :field1, field_2 = :field_2, __field_3__ = :__field_3__ " + + "WHERE id = :id AND __field_3__ = :__field_3__"); NamedStatementMatcher.parsedSql( "UPDATE tbl SET id = ?, name = ?, email = ?, ts = ?, field1 = ?, " + "field_2 = ?, __field_3__ = ? WHERE id = ? AND __field_3__ = ?") @@ -106,14 +107,14 @@ public void testUpdateStatement() { @Test public void testUpsertStatement() { String upsertStmt = dialect.getUpsertStatement(tableName, fieldNames, keyFields).get(); - assertEquals( - " MERGE INTO tbl t " - + " USING (SELECT :id id, :name name, :email email, :ts ts, :field1 field1, :field_2 field_2, :__field_3__ __field_3__ FROM DUAL) s " - + " ON (t.id=s.id and t.__field_3__=s.__field_3__) " - + " WHEN MATCHED THEN UPDATE SET t.name=s.name, t.email=s.email, t.ts=s.ts, t.field1=s.field1, t.field_2=s.field_2" - + " WHEN NOT MATCHED THEN INSERT (id, name, email, ts, field1, field_2, __field_3__)" - + " VALUES (s.id, s.name, s.email, s.ts, s.field1, s.field_2, s.__field_3__)", - upsertStmt); + assertThat(upsertStmt) + .isEqualTo( + " MERGE INTO tbl t " + + " USING (SELECT :id id, :name name, :email email, :ts ts, :field1 field1, :field_2 field_2, :__field_3__ __field_3__ FROM DUAL) s " + + " ON (t.id=s.id and t.__field_3__=s.__field_3__) " + + " WHEN MATCHED THEN UPDATE SET t.name=s.name, t.email=s.email, t.ts=s.ts, t.field1=s.field1, t.field_2=s.field_2" + + " WHEN NOT MATCHED THEN INSERT (id, name, email, ts, field1, field_2, __field_3__)" + + " VALUES (s.id, s.name, s.email, s.ts, s.field1, s.field_2, s.__field_3__)"); NamedStatementMatcher.parsedSql( " MERGE INTO tbl t " + " USING (SELECT ? id, ? name, ? email, ? ts, ? field1, ? field_2, ? __field_3__ FROM DUAL) s " @@ -134,10 +135,10 @@ public void testUpsertStatement() { @Test public void testSelectStatement() { String selectStmt = dialect.getSelectFromStatement(tableName, fieldNames, keyFields); - assertEquals( - "SELECT id, name, email, ts, field1, field_2, __field_3__ FROM tbl " - + "WHERE id = :id AND __field_3__ = :__field_3__", - selectStmt); + assertThat(selectStmt) + .isEqualTo( + "SELECT id, name, email, ts, field1, field_2, __field_3__ FROM tbl " + + "WHERE id = :id AND __field_3__ = :__field_3__"); NamedStatementMatcher.parsedSql( "SELECT id, name, email, ts, field1, field_2, __field_3__ FROM tbl " + "WHERE id = ? AND __field_3__ = ?") @@ -165,8 +166,8 @@ public void matches(String statement) { Map> actualParams = new HashMap<>(); String actualParsedStmt = FieldNamedPreparedStatementImpl.parseNamedStatement(statement, actualParams); - assertEquals(parsedSql, actualParsedStmt); - assertEquals(parameterMap, actualParams); + assertThat(actualParsedStmt).isEqualTo(parsedSql); + assertThat(actualParams).isEqualTo(parameterMap); } } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OracleTableSourceITCase.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OracleTableSourceITCase.java index bd5f0777358a1..749b1a3e2945e 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OracleTableSourceITCase.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/dialect/oracle/OracleTableSourceITCase.java @@ -41,8 +41,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** The Table Source ITCase for {@link OracleDialect}. */ public class OracleTableSourceITCase extends AbstractTestBase { @@ -155,7 +154,7 @@ public void testJdbcSource() throws Exception { "+I[2, 1.12345, 2.1234567879, 101.1234, -1.175E-10, -1.79769E40, a, abc, abcdef, 1997-01-02, 2020-01-01T15:36:01.123456, 2020-01-01T15:36:01.123456789, Hey Leonard, [69, 61, 122, 52]]") .sorted() .collect(Collectors.toList()); - assertEquals(expected, result); + assertThat(result).isEqualTo(expected); } @Test @@ -198,7 +197,7 @@ public void testProject() throws Exception { "+I[2, 2020-01-01T15:36:01.123456, 101.1234]") .sorted() .collect(Collectors.toList()); - assertEquals(expected, result); + assertThat(result).isEqualTo(expected); } @Test @@ -240,9 +239,9 @@ public void testLimit() throws Exception { "+I[1, 2020-01-01T15:35:00.123456, 2020-01-01T15:35:00.123456789, 1.175E-10, 1.79769E40, 100.1234]"); expected.add( "+I[2, 2020-01-01T15:36:01.123456, 2020-01-01T15:36:01.123456789, -1.175E-10, -1.79769E40, 101.1234]"); - assertEquals(1, result.size()); - assertTrue( - "The actual output is not a subset of the expected set.", - expected.containsAll(result)); + assertThat(result).hasSize(1); + assertThat(expected.containsAll(result)) + .as("The actual output is not a subset of the expected set.") + .isTrue(); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcFullTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcFullTest.java index 206d202060d0c..b1904f3a0cd55 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcFullTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcFullTest.java @@ -35,7 +35,6 @@ import org.apache.flink.types.Row; import org.junit.After; -import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -57,7 +56,7 @@ import static org.apache.flink.connector.jdbc.utils.JdbcUtils.setRecordToStatement; import static org.apache.flink.util.ExceptionUtils.findThrowable; import static org.apache.flink.util.ExceptionUtils.findThrowableWithMessage; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doReturn; /** Tests using both {@link JdbcInputFormat} and {@link JdbcOutputFormat}. */ @@ -106,8 +105,8 @@ public void testEnrichedClassCastException() { jdbcOutputFormat.writeRecord(Tuple2.of(true, inputRow)); jdbcOutputFormat.close(); } catch (Exception e) { - assertTrue(findThrowable(e, ClassCastException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); + assertThat(findThrowable(e, ClassCastException.class)).isPresent(); + assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); } } @@ -170,7 +169,7 @@ private void runTest(boolean exploitParallelism) throws Exception { while (resultSet.next()) { count++; } - Assert.assertEquals(TEST_DATA.length, count); + assertThat(count).isEqualTo(TEST_DATA.length); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcTableOutputFormatTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcTableOutputFormatTest.java index 97e442cac4385..9e684e383c5af 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcTableOutputFormatTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/JdbcTableOutputFormatTest.java @@ -47,8 +47,7 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.OUTPUT_TABLE; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TestEntry; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doReturn; /** Tests for the {@link JdbcOutputFormat}. */ @@ -165,10 +164,10 @@ public void closeStatements() {} format.writeRecord(Tuple2.of(false /* false = delete*/, toRow(TEST_DATA[0]))); format.flush(); - assertTrue("Delete should be executed", deleteExecuted[0]); - assertTrue( - "Delete executor should be prepared" + exceptionThrown[0], - deleteExecutorPrepared[0]); + assertThat(deleteExecuted[0]).as("Delete should be executed").isTrue(); + assertThat(deleteExecutorPrepared[0]) + .as("Delete executor should be prepared" + exceptionThrown[0]) + .isTrue(); } @Test @@ -243,7 +242,7 @@ public static void check(Row[] rows, String url, String table, String[] fields) String[] sortedResult = results.toArray(new String[0]); Arrays.sort(sortedExpect); Arrays.sort(sortedResult); - assertArrayEquals(sortedExpect, sortedResult); + assertThat(sortedResult).isEqualTo(sortedExpect); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderDriverClassConcurrentLoadingITCase.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderDriverClassConcurrentLoadingITCase.java index 22b50361f2f97..ffd2e5e74c219 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderDriverClassConcurrentLoadingITCase.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderDriverClassConcurrentLoadingITCase.java @@ -29,8 +29,7 @@ import java.util.concurrent.CountDownLatch; import java.util.function.Function; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** * This test deals with sql driver class loading issues; run as an ITCase so it won't be interfered @@ -55,8 +54,8 @@ private static boolean isClassLoaded(ClassLoader classLoader, String className) public void testDriverClassConcurrentLoading() throws Exception { ClassLoader classLoader = getClass().getClassLoader(); - assertFalse(isClassLoaded(classLoader, FakeDBUtils.DRIVER1_CLASS_NAME)); - assertFalse(isClassLoaded(classLoader, FakeDBUtils.DRIVER2_CLASS_NAME)); + assertThat(isClassLoaded(classLoader, FakeDBUtils.DRIVER1_CLASS_NAME)).isFalse(); + assertThat(isClassLoaded(classLoader, FakeDBUtils.DRIVER2_CLASS_NAME)).isFalse(); JdbcConnectionOptions connectionOptions1 = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder() @@ -103,7 +102,7 @@ public void go() throws Exception { connectionThread1.sync(); connectionThread2.sync(); - assertTrue(isClassLoaded(classLoader, FakeDBUtils.DRIVER1_CLASS_NAME)); - assertTrue(isClassLoaded(classLoader, FakeDBUtils.DRIVER2_CLASS_NAME)); + assertThat(isClassLoaded(classLoader, FakeDBUtils.DRIVER1_CLASS_NAME)).isTrue(); + assertThat(isClassLoaded(classLoader, FakeDBUtils.DRIVER2_CLASS_NAME)).isTrue(); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderTest.java index d80f23fbabf55..41a052140a7c1 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/internal/connection/SimpleJdbcConnectionProviderTest.java @@ -34,18 +34,8 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test for {@link SimpleJdbcConnectionProvider}. */ public class SimpleJdbcConnectionProviderTest { @@ -71,36 +61,36 @@ private static JdbcConnectionProvider newFakeConnectionProvider() { @Test public void testEstablishConnection() throws Exception { JdbcConnectionProvider provider = newFakeConnectionProvider(); - assertNull(provider.getConnection()); - assertFalse(provider.isConnectionValid()); + assertThat(provider.getConnection()).isNull(); + assertThat(provider.isConnectionValid()).isFalse(); Connection connection = provider.getOrEstablishConnection(); - assertNotNull(connection); - assertFalse(connection.isClosed()); - assertTrue(provider.isConnectionValid()); - assertThat(connection, instanceOf(FakeConnection.class)); - - assertNotNull(provider.getConnection()); - assertSame(connection, provider.getConnection()); - assertSame(connection, provider.getOrEstablishConnection()); + assertThat(connection).isNotNull(); + assertThat(connection.isClosed()).isFalse(); + assertThat(provider.isConnectionValid()).isTrue(); + assertThat(connection).isInstanceOf(FakeConnection.class); + + assertThat(provider.getConnection()).isNotNull(); + assertThat(provider.getConnection()).isSameAs(connection); + assertThat(provider.getOrEstablishConnection()).isSameAs(connection); } @Test public void testEstablishConnectionWithoutDriverName() throws Exception { JdbcConnectionProvider provider = newProvider(FakeDBUtils.TEST_DB_URL, null); - assertNull(provider.getConnection()); - assertFalse(provider.isConnectionValid()); + assertThat(provider.getConnection()).isNull(); + assertThat(provider.isConnectionValid()).isFalse(); Connection connection = provider.getOrEstablishConnection(); - assertNotNull(connection); - assertFalse(connection.isClosed()); - assertTrue(provider.isConnectionValid()); - assertThat(connection, instanceOf(FakeConnection.class)); - assertThat(connection, not(instanceOf(FakeConnection3.class))); - - assertNotNull(provider.getConnection()); - assertSame(connection, provider.getConnection()); - assertSame(connection, provider.getOrEstablishConnection()); + assertThat(connection).isNotNull(); + assertThat(connection.isClosed()).isFalse(); + assertThat(provider.isConnectionValid()).isTrue(); + assertThat(connection).isInstanceOf(FakeConnection.class); + assertThat(connection).isNotInstanceOf(FakeConnection3.class); + + assertThat(provider.getConnection()).isNotNull(); + assertThat(provider.getConnection()).isSameAs(connection); + assertThat(provider.getOrEstablishConnection()).isSameAs(connection); } @Test @@ -108,12 +98,12 @@ public void testEstablishDriverConnection() throws Exception { JdbcConnectionProvider provider1 = newFakeConnectionProviderWithDriverName(FakeDBUtils.DRIVER1_CLASS_NAME); Connection connection1 = provider1.getOrEstablishConnection(); - assertThat(connection1, instanceOf(FakeConnection1.class)); + assertThat(connection1).isInstanceOf(FakeConnection1.class); JdbcConnectionProvider provider2 = newFakeConnectionProviderWithDriverName(FakeDBUtils.DRIVER2_CLASS_NAME); Connection connection2 = provider2.getOrEstablishConnection(); - assertThat(connection2, instanceOf(FakeConnection2.class)); + assertThat(connection2).isInstanceOf(FakeConnection2.class); } @Test @@ -124,35 +114,31 @@ public void testEstablishUnregisteredDriverConnection() throws Exception { .map(Driver::getClass) .map(Class::getName) .collect(Collectors.toSet()); - assertThat(registeredDriverNames, not(hasItem(unregisteredDriverName))); + assertThat(registeredDriverNames).doesNotContain(unregisteredDriverName); JdbcConnectionProvider provider = newFakeConnectionProviderWithDriverName(unregisteredDriverName); Connection connection = provider.getOrEstablishConnection(); - assertThat(connection, instanceOf(FakeConnection3.class)); + assertThat(connection).isInstanceOf(FakeConnection3.class); } @Test - public void testInvalidDriverUrl() throws Exception { + public void testInvalidDriverUrl() { JdbcConnectionProvider provider = newProvider(FakeDBUtils.TEST_DB_INVALID_URL, FakeDBUtils.DRIVER1_CLASS_NAME); - try { - provider.getOrEstablishConnection(); - fail("expect exception"); - } catch (SQLException ex) { - assertThat( - ex.getMessage(), - containsString( - "No suitable driver found for " + FakeDBUtils.TEST_DB_INVALID_URL)); - } + + assertThatThrownBy(provider::getOrEstablishConnection) + .isInstanceOf(SQLException.class) + .hasMessageContaining( + "No suitable driver found for " + FakeDBUtils.TEST_DB_INVALID_URL); } @Test public void testCloseNullConnection() throws Exception { JdbcConnectionProvider provider = newFakeConnectionProvider(); provider.closeConnection(); - assertNull(provider.getConnection()); - assertFalse(provider.isConnectionValid()); + assertThat(provider.getConnection()).isNull(); + assertThat(provider.isConnectionValid()).isFalse(); } @Test @@ -161,17 +147,17 @@ public void testCloseConnection() throws Exception { Connection connection1 = provider.getOrEstablishConnection(); provider.closeConnection(); - assertNull(provider.getConnection()); - assertFalse(provider.isConnectionValid()); - assertTrue(connection1.isClosed()); + assertThat(provider.getConnection()).isNull(); + assertThat(provider.isConnectionValid()).isFalse(); + assertThat(connection1.isClosed()).isTrue(); Connection connection2 = provider.getOrEstablishConnection(); - assertNotSame(connection1, connection2); - assertFalse(connection2.isClosed()); + assertThat(connection2).isNotSameAs(connection1); + assertThat(connection2.isClosed()).isFalse(); connection2.close(); - assertNotNull(provider.getConnection()); - assertFalse(provider.isConnectionValid()); + assertThat(provider.getConnection()).isNotNull(); + assertThat(provider.isConnectionValid()).isFalse(); } @Test @@ -179,18 +165,18 @@ public void testReestablishCachedConnection() throws Exception { JdbcConnectionProvider provider = newFakeConnectionProvider(); Connection connection1 = provider.reestablishConnection(); - assertNotNull(connection1); - assertFalse(connection1.isClosed()); - assertSame(connection1, provider.getConnection()); - assertSame(connection1, provider.getOrEstablishConnection()); + assertThat(connection1).isNotNull(); + assertThat(connection1.isClosed()).isFalse(); + assertThat(provider.getConnection()).isSameAs(connection1); + assertThat(provider.getOrEstablishConnection()).isSameAs(connection1); Connection connection2 = provider.reestablishConnection(); - assertNotNull(connection2); - assertFalse(connection2.isClosed()); - assertSame(connection2, provider.getConnection()); - assertSame(connection2, provider.getOrEstablishConnection()); + assertThat(connection2).isNotNull(); + assertThat(connection2.isClosed()).isFalse(); + assertThat(provider.getConnection()).isSameAs(connection2); + assertThat(provider.getOrEstablishConnection()).isSameAs(connection2); - assertTrue(connection1.isClosed()); - assertNotSame(connection1, connection2); + assertThat(connection1.isClosed()).isTrue(); + assertThat(connection2).isNotSameAs(connection1); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java index 6fe151c468487..4698c34f42ca3 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java @@ -22,7 +22,7 @@ import java.io.Serializable; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link JdbcNumericBetweenParametersProvider}. */ public class NumericBetweenParametersProviderTest { @@ -116,10 +116,10 @@ public void testBatchNumTooLarge() { } private void check(long[][] expected, Serializable[][] actual) { - assertEquals(expected.length, actual.length); + assertThat(actual).hasDimensions(expected.length, 2); for (int i = 0; i < expected.length; i++) { for (int j = 0; j < 2; j++) { - assertEquals(expected[i][j], ((Long) actual[i][j]).longValue()); + assertThat(((Long) actual[i][j]).longValue()).isEqualTo(expected[i][j]); } } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/statement/FieldNamedPreparedStatementImplTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/statement/FieldNamedPreparedStatementImplTest.java index 0be4141868f99..6013df6ecb9e5 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/statement/FieldNamedPreparedStatementImplTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/statement/FieldNamedPreparedStatementImplTest.java @@ -29,7 +29,7 @@ import static java.util.Arrays.asList; import static java.util.Collections.singletonList; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link FieldNamedPreparedStatementImpl}. */ public class FieldNamedPreparedStatementImplTest { @@ -43,10 +43,10 @@ public class FieldNamedPreparedStatementImplTest { @Test public void testInsertStatement() { String insertStmt = dialect.getInsertIntoStatement(tableName, fieldNames); - assertEquals( - "INSERT INTO `tbl`(`id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__`) " - + "VALUES (:id, :name, :email, :ts, :field1, :field_2, :__field_3__)", - insertStmt); + assertThat(insertStmt) + .isEqualTo( + "INSERT INTO `tbl`(`id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__`) " + + "VALUES (:id, :name, :email, :ts, :field1, :field_2, :__field_3__)"); NamedStatementMatcher.parsedSql( "INSERT INTO `tbl`(`id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__`) " + "VALUES (?, ?, ?, ?, ?, ?, ?)") @@ -63,8 +63,8 @@ public void testInsertStatement() { @Test public void testDeleteStatement() { String deleteStmt = dialect.getDeleteStatement(tableName, keyFields); - assertEquals( - "DELETE FROM `tbl` WHERE `id` = :id AND `__field_3__` = :__field_3__", deleteStmt); + assertThat(deleteStmt) + .isEqualTo("DELETE FROM `tbl` WHERE `id` = :id AND `__field_3__` = :__field_3__"); NamedStatementMatcher.parsedSql("DELETE FROM `tbl` WHERE `id` = ? AND `__field_3__` = ?") .parameter("id", singletonList(1)) .parameter("__field_3__", singletonList(2)) @@ -74,9 +74,8 @@ public void testDeleteStatement() { @Test public void testRowExistsStatement() { String rowExistStmt = dialect.getRowExistsStatement(tableName, keyFields); - assertEquals( - "SELECT 1 FROM `tbl` WHERE `id` = :id AND `__field_3__` = :__field_3__", - rowExistStmt); + assertThat(rowExistStmt) + .isEqualTo("SELECT 1 FROM `tbl` WHERE `id` = :id AND `__field_3__` = :__field_3__"); NamedStatementMatcher.parsedSql("SELECT 1 FROM `tbl` WHERE `id` = ? AND `__field_3__` = ?") .parameter("id", singletonList(1)) .parameter("__field_3__", singletonList(2)) @@ -86,11 +85,11 @@ public void testRowExistsStatement() { @Test public void testUpdateStatement() { String updateStmt = dialect.getUpdateStatement(tableName, fieldNames, keyFields); - assertEquals( - "UPDATE `tbl` SET `id` = :id, `name` = :name, `email` = :email, `ts` = :ts, " - + "`field1` = :field1, `field_2` = :field_2, `__field_3__` = :__field_3__ " - + "WHERE `id` = :id AND `__field_3__` = :__field_3__", - updateStmt); + assertThat(updateStmt) + .isEqualTo( + "UPDATE `tbl` SET `id` = :id, `name` = :name, `email` = :email, `ts` = :ts, " + + "`field1` = :field1, `field_2` = :field_2, `__field_3__` = :__field_3__ " + + "WHERE `id` = :id AND `__field_3__` = :__field_3__"); NamedStatementMatcher.parsedSql( "UPDATE `tbl` SET `id` = ?, `name` = ?, `email` = ?, `ts` = ?, `field1` = ?, " + "`field_2` = ?, `__field_3__` = ? WHERE `id` = ? AND `__field_3__` = ?") @@ -107,13 +106,13 @@ public void testUpdateStatement() { @Test public void testUpsertStatement() { String upsertStmt = dialect.getUpsertStatement(tableName, fieldNames, keyFields).get(); - assertEquals( - "INSERT INTO `tbl`(`id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__`) " - + "VALUES (:id, :name, :email, :ts, :field1, :field_2, :__field_3__) " - + "ON DUPLICATE KEY UPDATE `id`=VALUES(`id`), `name`=VALUES(`name`), " - + "`email`=VALUES(`email`), `ts`=VALUES(`ts`), `field1`=VALUES(`field1`)," - + " `field_2`=VALUES(`field_2`), `__field_3__`=VALUES(`__field_3__`)", - upsertStmt); + assertThat(upsertStmt) + .isEqualTo( + "INSERT INTO `tbl`(`id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__`) " + + "VALUES (:id, :name, :email, :ts, :field1, :field_2, :__field_3__) " + + "ON DUPLICATE KEY UPDATE `id`=VALUES(`id`), `name`=VALUES(`name`), " + + "`email`=VALUES(`email`), `ts`=VALUES(`ts`), `field1`=VALUES(`field1`)," + + " `field_2`=VALUES(`field_2`), `__field_3__`=VALUES(`__field_3__`)"); NamedStatementMatcher.parsedSql( "INSERT INTO `tbl`(`id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__`) " + "VALUES (?, ?, ?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE " @@ -132,10 +131,10 @@ public void testUpsertStatement() { @Test public void testSelectStatement() { String selectStmt = dialect.getSelectFromStatement(tableName, fieldNames, keyFields); - assertEquals( - "SELECT `id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__` FROM `tbl` " - + "WHERE `id` = :id AND `__field_3__` = :__field_3__", - selectStmt); + assertThat(selectStmt) + .isEqualTo( + "SELECT `id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__` FROM `tbl` " + + "WHERE `id` = :id AND `__field_3__` = :__field_3__"); NamedStatementMatcher.parsedSql( "SELECT `id`, `name`, `email`, `ts`, `field1`, `field_2`, `__field_3__` FROM `tbl` " + "WHERE `id` = ? AND `__field_3__` = ?") @@ -163,8 +162,8 @@ public void matches(String statement) { Map> actualParams = new HashMap<>(); String actualParsedStmt = FieldNamedPreparedStatementImpl.parseNamedStatement(statement, actualParams); - assertEquals(parsedSql, actualParsedStmt); - assertEquals(parameterMap, actualParams); + assertThat(actualParsedStmt).isEqualTo(parsedSql); + assertThat(actualParams).isEqualTo(parameterMap); } } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java index a3e08abef731b..6140dacb8bac9 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java @@ -40,9 +40,8 @@ import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink; import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSource; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** * Test for {@link JdbcDynamicTableSource} and {@link JdbcDynamicTableSink} created by {@link @@ -92,7 +91,7 @@ public void testJdbcCommonProperties() { JdbcReadOptions.builder().build(), lookupOptions, SCHEMA.toPhysicalRowDataType()); - assertEquals(expectedSource, actualSource); + assertThat(actualSource).isEqualTo(expectedSource); // validation for sink DynamicTableSink actualSink = createTableSink(SCHEMA, properties); @@ -113,7 +112,7 @@ public void testJdbcCommonProperties() { JdbcDynamicTableSink expectedSink = new JdbcDynamicTableSink( options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType()); - assertEquals(expectedSink, actualSink); + assertThat(actualSink).isEqualTo(expectedSink); } @Test @@ -152,7 +151,7 @@ public void testJdbcReadProperties() { new JdbcDynamicTableSource( options, readOptions, lookupOptions, SCHEMA.toPhysicalRowDataType()); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); } @Test @@ -182,7 +181,7 @@ public void testJdbcLookupProperties() { lookupOptions, SCHEMA.toPhysicalRowDataType()); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); } @Test @@ -217,7 +216,7 @@ public void testJdbcSinkProperties() { new JdbcDynamicTableSink( options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType()); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); } @Test @@ -251,7 +250,7 @@ public void testJDBCSinkWithParallelism() { new JdbcDynamicTableSink( options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType()); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); } @Test @@ -264,12 +263,12 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, "Either all or none of the following options should be provided:\n" - + "username\npassword") - .isPresent()); + + "username\npassword")) + .isPresent(); } // read partition properties not complete @@ -282,15 +281,15 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, "Either all or none of the following options should be provided:\n" + "scan.partition.column\n" + "scan.partition.num\n" + "scan.partition.lower-bound\n" - + "scan.partition.upper-bound") - .isPresent()); + + "scan.partition.upper-bound")) + .isPresent(); } // read partition lower-bound > upper-bound @@ -304,12 +303,12 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, "'scan.partition.lower-bound'='100' must not be larger than " - + "'scan.partition.upper-bound'='-10'.") - .isPresent()); + + "'scan.partition.upper-bound'='-10'.")) + .isPresent(); } // lookup cache properties not complete @@ -320,13 +319,13 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, "Either all or none of the following options should be provided:\n" + "lookup.cache.max-rows\n" - + "lookup.cache.ttl") - .isPresent()); + + "lookup.cache.ttl")) + .isPresent(); } // lookup cache properties not complete @@ -337,13 +336,13 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, "Either all or none of the following options should be provided:\n" + "lookup.cache.max-rows\n" - + "lookup.cache.ttl") - .isPresent()); + + "lookup.cache.ttl")) + .isPresent(); } // lookup retries shouldn't be negative @@ -353,11 +352,11 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, - "The value of 'lookup.max-retries' option shouldn't be negative, but is -1.") - .isPresent()); + "The value of 'lookup.max-retries' option shouldn't be negative, but is -1.")) + .isPresent(); } // sink retries shouldn't be negative @@ -367,11 +366,11 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, - "The value of 'sink.max-retries' option shouldn't be negative, but is -1.") - .isPresent()); + "The value of 'sink.max-retries' option shouldn't be negative, but is -1.")) + .isPresent(); } // connection.max-retry-timeout shouldn't be smaller than 1 second @@ -381,11 +380,11 @@ public void testJdbcValidation() { createTableSource(SCHEMA, properties); fail("exception expected"); } catch (Throwable t) { - assertTrue( - ExceptionUtils.findThrowableWithMessage( + assertThat( + ExceptionUtils.findThrowableWithMessage( t, - "The value of 'connection.max-retry-timeout' option must be in second granularity and shouldn't be smaller than 1 second, but is 100ms.") - .isPresent()); + "The value of 'connection.max-retry-timeout' option must be in second granularity and shouldn't be smaller than 1 second, but is 100ms.")) + .isPresent(); } } @@ -418,7 +417,7 @@ public void testJdbcLookupPropertiesWithExcludeEmptyResult() { lookupOptions, SCHEMA.toPhysicalRowDataType()); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); } private Map getAllOptions() { diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableSourceITCase.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableSourceITCase.java index 4f4822447b59f..f8de199c65b1a 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableSourceITCase.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableSourceITCase.java @@ -43,8 +43,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** ITCase for {@link JdbcDynamicTableSource}. */ public class JdbcDynamicTableSourceITCase extends AbstractTestBase { @@ -144,7 +143,7 @@ public void testJdbcSource() throws Exception { "+I[2, 2020-01-01T15:36:01.123456, 2020-01-01T15:36:01.123456789, 15:36:01, -1.175E-37, -1.79769E308, 101.1234]") .sorted() .collect(Collectors.toList()); - assertEquals(expected, result); + assertThat(result).isEqualTo(expected); } @Test @@ -188,7 +187,7 @@ public void testProject() throws Exception { "+I[2, 2020-01-01T15:36:01.123456, 101.1234]") .sorted() .collect(Collectors.toList()); - assertEquals(expected, result); + assertThat(result).isEqualTo(expected); } @Test @@ -231,9 +230,9 @@ public void testLimit() throws Exception { "+I[1, 2020-01-01T15:35:00.123456, 2020-01-01T15:35:00.123456789, 15:35, 1.175E-37, 1.79769E308, 100.1234]"); expected.add( "+I[2, 2020-01-01T15:36:01.123456, 2020-01-01T15:36:01.123456789, 15:36:01, -1.175E-37, -1.79769E308, 101.1234]"); - assertEquals(1, result.size()); - assertTrue( - "The actual output is not a subset of the expected set.", - expected.containsAll(result)); + assertThat(result).hasSize(1); + assertThat(expected) + .as("The actual output is not a subset of the expected set.") + .containsAll(result); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcOutputFormatTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcOutputFormatTest.java index 7544808bfac27..9ecac711b658e 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcOutputFormatTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcOutputFormatTest.java @@ -52,12 +52,8 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.SELECT_ALL_NEWBOOKS_3; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TestEntry; -import static org.apache.flink.util.ExceptionUtils.findThrowable; -import static org.apache.flink.util.ExceptionUtils.findThrowableWithMessage; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test suite for {@link JdbcOutputFormatBuilder}. */ public class JdbcOutputFormatTest extends JdbcDataTestBase { @@ -91,181 +87,190 @@ public void tearDown() throws Exception { @Test public void testInvalidDriver() { String expectedMsg = "unable to open JDBC writer"; - try { - JdbcConnectorOptions jdbcOptions = - JdbcConnectorOptions.builder() - .setDriverName("org.apache.derby.jdbc.idontexist") - .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) - .setTableName(INPUT_TABLE) - .build(); - JdbcDmlOptions dmlOptions = - JdbcDmlOptions.builder() - .withTableName(jdbcOptions.getTableName()) - .withDialect(jdbcOptions.getDialect()) - .withFieldNames(fieldNames) - .build(); - - outputFormat = - new JdbcOutputFormatBuilder() - .setJdbcOptions(jdbcOptions) - .setFieldDataTypes(fieldDataTypes) - .setJdbcDmlOptions(dmlOptions) - .setJdbcExecutionOptions(JdbcExecutionOptions.builder().build()) - .build(); - outputFormat.open(0, 1); - fail("Expected exception is not thrown."); - } catch (Exception e) { - assertTrue(findThrowable(e, IOException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); - } + assertThatThrownBy( + () -> { + JdbcConnectorOptions jdbcOptions = + JdbcConnectorOptions.builder() + .setDriverName("org.apache.derby.jdbc.idontexist") + .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) + .setTableName(INPUT_TABLE) + .build(); + JdbcDmlOptions dmlOptions = + JdbcDmlOptions.builder() + .withTableName(jdbcOptions.getTableName()) + .withDialect(jdbcOptions.getDialect()) + .withFieldNames(fieldNames) + .build(); + + outputFormat = + new JdbcOutputFormatBuilder() + .setJdbcOptions(jdbcOptions) + .setFieldDataTypes(fieldDataTypes) + .setJdbcDmlOptions(dmlOptions) + .setJdbcExecutionOptions( + JdbcExecutionOptions.builder().build()) + .build(); + outputFormat.open(0, 1); + }) + .hasMessage(expectedMsg); } @Test public void testInvalidURL() { - try { - JdbcConnectorOptions jdbcOptions = - JdbcConnectorOptions.builder() - .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) - .setDBUrl("jdbc:der:iamanerror:mory:ebookshop") - .setTableName(INPUT_TABLE) - .build(); - JdbcDmlOptions dmlOptions = - JdbcDmlOptions.builder() - .withTableName(jdbcOptions.getTableName()) - .withDialect(jdbcOptions.getDialect()) - .withFieldNames(fieldNames) - .build(); - - outputFormat = - new JdbcOutputFormatBuilder() - .setJdbcOptions(jdbcOptions) - .setFieldDataTypes(fieldDataTypes) - .setJdbcDmlOptions(dmlOptions) - .setJdbcExecutionOptions(JdbcExecutionOptions.builder().build()) - .build(); - outputFormat.open(0, 1); - fail("Expected exception is not thrown."); - } catch (Exception e) { - assertTrue(findThrowable(e, IllegalStateException.class).isPresent()); - } + assertThatThrownBy( + () -> { + JdbcConnectorOptions jdbcOptions = + JdbcConnectorOptions.builder() + .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) + .setDBUrl("jdbc:der:iamanerror:mory:ebookshop") + .setTableName(INPUT_TABLE) + .build(); + JdbcDmlOptions dmlOptions = + JdbcDmlOptions.builder() + .withTableName(jdbcOptions.getTableName()) + .withDialect(jdbcOptions.getDialect()) + .withFieldNames(fieldNames) + .build(); + + outputFormat = + new JdbcOutputFormatBuilder() + .setJdbcOptions(jdbcOptions) + .setFieldDataTypes(fieldDataTypes) + .setJdbcDmlOptions(dmlOptions) + .setJdbcExecutionOptions( + JdbcExecutionOptions.builder().build()) + .build(); + outputFormat.open(0, 1); + }) + .isInstanceOf(IllegalStateException.class); } @Test public void testIncompatibleTypes() { - try { - JdbcConnectorOptions jdbcOptions = - JdbcConnectorOptions.builder() - .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) - .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) - .setTableName(INPUT_TABLE) - .build(); - JdbcDmlOptions dmlOptions = - JdbcDmlOptions.builder() - .withTableName(jdbcOptions.getTableName()) - .withDialect(jdbcOptions.getDialect()) - .withFieldNames(fieldNames) - .build(); - - outputFormat = - new JdbcOutputFormatBuilder() - .setJdbcOptions(jdbcOptions) - .setFieldDataTypes(fieldDataTypes) - .setJdbcDmlOptions(dmlOptions) - .setJdbcExecutionOptions(JdbcExecutionOptions.builder().build()) - .setRowDataTypeInfo(rowDataTypeInfo) - .build(); - - setRuntimeContext(outputFormat, false); - outputFormat.open(0, 1); - - RowData row = buildGenericData(4, "hello", "world", 0.99, "imthewrongtype"); - outputFormat.writeRecord(row); - outputFormat.close(); - fail("Expected exception is not thrown."); - } catch (Exception e) { - assertTrue(findThrowable(e, ClassCastException.class).isPresent()); - } + assertThatThrownBy( + () -> { + JdbcConnectorOptions jdbcOptions = + JdbcConnectorOptions.builder() + .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) + .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) + .setTableName(INPUT_TABLE) + .build(); + JdbcDmlOptions dmlOptions = + JdbcDmlOptions.builder() + .withTableName(jdbcOptions.getTableName()) + .withDialect(jdbcOptions.getDialect()) + .withFieldNames(fieldNames) + .build(); + + outputFormat = + new JdbcOutputFormatBuilder() + .setJdbcOptions(jdbcOptions) + .setFieldDataTypes(fieldDataTypes) + .setJdbcDmlOptions(dmlOptions) + .setJdbcExecutionOptions( + JdbcExecutionOptions.builder().build()) + .setRowDataTypeInfo(rowDataTypeInfo) + .build(); + + setRuntimeContext(outputFormat, false); + outputFormat.open(0, 1); + + RowData row = + buildGenericData(4, "hello", "world", 0.99, "imthewrongtype"); + outputFormat.writeRecord(row); + outputFormat.close(); + }) + .getRootCause() + .isInstanceOf(ClassCastException.class); } @Test public void testExceptionOnInvalidType() { - try { - JdbcConnectorOptions jdbcOptions = - JdbcConnectorOptions.builder() - .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) - .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) - .setTableName(OUTPUT_TABLE) - .build(); - JdbcDmlOptions dmlOptions = - JdbcDmlOptions.builder() - .withTableName(jdbcOptions.getTableName()) - .withDialect(jdbcOptions.getDialect()) - .withFieldNames(fieldNames) - .build(); - - outputFormat = - new JdbcOutputFormatBuilder() - .setJdbcOptions(jdbcOptions) - .setFieldDataTypes(fieldDataTypes) - .setJdbcDmlOptions(dmlOptions) - .setJdbcExecutionOptions(JdbcExecutionOptions.builder().build()) - .setRowDataTypeInfo(rowDataTypeInfo) - .build(); - setRuntimeContext(outputFormat, false); - outputFormat.open(0, 1); - - TestEntry entry = TEST_DATA[0]; - RowData row = buildGenericData(entry.id, entry.title, entry.author, 0L, entry.qty); - outputFormat.writeRecord(row); - outputFormat.close(); - fail("Expected exception is not thrown."); - } catch (Exception e) { - assertTrue(findThrowable(e, ClassCastException.class).isPresent()); - } + assertThatThrownBy( + () -> { + JdbcConnectorOptions jdbcOptions = + JdbcConnectorOptions.builder() + .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) + .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) + .setTableName(OUTPUT_TABLE) + .build(); + JdbcDmlOptions dmlOptions = + JdbcDmlOptions.builder() + .withTableName(jdbcOptions.getTableName()) + .withDialect(jdbcOptions.getDialect()) + .withFieldNames(fieldNames) + .build(); + + outputFormat = + new JdbcOutputFormatBuilder() + .setJdbcOptions(jdbcOptions) + .setFieldDataTypes(fieldDataTypes) + .setJdbcDmlOptions(dmlOptions) + .setJdbcExecutionOptions( + JdbcExecutionOptions.builder().build()) + .setRowDataTypeInfo(rowDataTypeInfo) + .build(); + setRuntimeContext(outputFormat, false); + outputFormat.open(0, 1); + + TestEntry entry = TEST_DATA[0]; + RowData row = + buildGenericData( + entry.id, entry.title, entry.author, 0L, entry.qty); + outputFormat.writeRecord(row); + outputFormat.close(); + }) + .getRootCause() + .isInstanceOf(ClassCastException.class); } @Test public void testExceptionOnClose() { String expectedMsg = "Writing records to JDBC failed."; - try { - JdbcConnectorOptions jdbcOptions = - JdbcConnectorOptions.builder() - .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) - .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) - .setTableName(OUTPUT_TABLE) - .build(); - JdbcDmlOptions dmlOptions = - JdbcDmlOptions.builder() - .withTableName(jdbcOptions.getTableName()) - .withDialect(jdbcOptions.getDialect()) - .withFieldNames(fieldNames) - .build(); - - outputFormat = - new JdbcOutputFormatBuilder() - .setJdbcOptions(jdbcOptions) - .setFieldDataTypes(fieldDataTypes) - .setJdbcDmlOptions(dmlOptions) - .setJdbcExecutionOptions(JdbcExecutionOptions.builder().build()) - .setRowDataTypeInfo(rowDataTypeInfo) - .build(); - setRuntimeContext(outputFormat, true); - outputFormat.open(0, 1); - - TestEntry entry = TEST_DATA[0]; - RowData row = - buildGenericData(entry.id, entry.title, entry.author, entry.price, entry.qty); - - outputFormat.writeRecord(row); - outputFormat.writeRecord( - row); // writing the same record twice must yield a unique key violation. - outputFormat.close(); - - fail("Expected exception is not thrown."); - } catch (Exception e) { - assertTrue(findThrowable(e, RuntimeException.class).isPresent()); - assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); - } + assertThatThrownBy( + () -> { + JdbcConnectorOptions jdbcOptions = + JdbcConnectorOptions.builder() + .setDriverName(DERBY_EBOOKSHOP_DB.getDriverClass()) + .setDBUrl(DERBY_EBOOKSHOP_DB.getUrl()) + .setTableName(OUTPUT_TABLE) + .build(); + JdbcDmlOptions dmlOptions = + JdbcDmlOptions.builder() + .withTableName(jdbcOptions.getTableName()) + .withDialect(jdbcOptions.getDialect()) + .withFieldNames(fieldNames) + .build(); + + outputFormat = + new JdbcOutputFormatBuilder() + .setJdbcOptions(jdbcOptions) + .setFieldDataTypes(fieldDataTypes) + .setJdbcDmlOptions(dmlOptions) + .setJdbcExecutionOptions( + JdbcExecutionOptions.builder().build()) + .setRowDataTypeInfo(rowDataTypeInfo) + .build(); + setRuntimeContext(outputFormat, true); + outputFormat.open(0, 1); + + TestEntry entry = TEST_DATA[0]; + RowData row = + buildGenericData( + entry.id, + entry.title, + entry.author, + entry.price, + entry.qty); + + outputFormat.writeRecord(row); + outputFormat.writeRecord( + row); // writing the same record twice must yield a unique key + // violation. + outputFormat.close(); + }) + .isInstanceOf(RuntimeException.class) + .hasMessage(expectedMsg); } @Test @@ -309,15 +314,15 @@ public void testJdbcOutputFormat() throws IOException, SQLException { ResultSet resultSet = statement.executeQuery()) { int recordCount = 0; while (resultSet.next()) { - assertEquals(TEST_DATA[recordCount].id, resultSet.getObject("id")); - assertEquals(TEST_DATA[recordCount].title, resultSet.getObject("title")); - assertEquals(TEST_DATA[recordCount].author, resultSet.getObject("author")); - assertEquals(TEST_DATA[recordCount].price, resultSet.getObject("price")); - assertEquals(TEST_DATA[recordCount].qty, resultSet.getObject("qty")); + assertThat(resultSet.getObject("id")).isEqualTo(TEST_DATA[recordCount].id); + assertThat(resultSet.getObject("title")).isEqualTo(TEST_DATA[recordCount].title); + assertThat(resultSet.getObject("author")).isEqualTo(TEST_DATA[recordCount].author); + assertThat(resultSet.getObject("price")).isEqualTo(TEST_DATA[recordCount].price); + assertThat(resultSet.getObject("qty")).isEqualTo(TEST_DATA[recordCount].qty); recordCount++; } - assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } } @@ -362,7 +367,7 @@ public void testFlush() throws SQLException, IOException { TEST_DATA[i].qty)); } try (ResultSet resultSet = statement.executeQuery()) { - assertFalse(resultSet.next()); + assertThat(resultSet.next()).isFalse(); } outputFormat.writeRecord( buildGenericData( @@ -374,14 +379,17 @@ public void testFlush() throws SQLException, IOException { try (ResultSet resultSet = statement.executeQuery()) { int recordCount = 0; while (resultSet.next()) { - assertEquals(TEST_DATA[recordCount].id, resultSet.getObject("id")); - assertEquals(TEST_DATA[recordCount].title, resultSet.getObject("title")); - assertEquals(TEST_DATA[recordCount].author, resultSet.getObject("author")); - assertEquals(TEST_DATA[recordCount].price, resultSet.getObject("price")); - assertEquals(TEST_DATA[recordCount].qty, resultSet.getObject("qty")); + assertThat(resultSet.getObject("id")).isEqualTo(TEST_DATA[recordCount].id); + assertThat(resultSet.getObject("title")) + .isEqualTo(TEST_DATA[recordCount].title); + assertThat(resultSet.getObject("author")) + .isEqualTo(TEST_DATA[recordCount].author); + assertThat(resultSet.getObject("price")) + .isEqualTo(TEST_DATA[recordCount].price); + assertThat(resultSet.getObject("qty")).isEqualTo(TEST_DATA[recordCount].qty); recordCount++; } - assertEquals(3, recordCount); + assertThat(recordCount).isEqualTo(3); } } finally { outputFormat.close(); @@ -428,7 +436,7 @@ public void testFlushWithBatchSizeEqualsZero() throws SQLException, IOException TEST_DATA[i].qty)); } try (ResultSet resultSet = statement.executeQuery()) { - assertFalse(resultSet.next()); + assertThat(resultSet.next()).isFalse(); } } finally { outputFormat.close(); @@ -485,15 +493,15 @@ public void testInvalidConnectionInJdbcOutputFormat() throws IOException, SQLExc ResultSet resultSet = statement.executeQuery()) { int recordCount = 0; while (resultSet.next()) { - assertEquals(TEST_DATA[recordCount].id, resultSet.getObject("id")); - assertEquals(TEST_DATA[recordCount].title, resultSet.getObject("title")); - assertEquals(TEST_DATA[recordCount].author, resultSet.getObject("author")); - assertEquals(TEST_DATA[recordCount].price, resultSet.getObject("price")); - assertEquals(TEST_DATA[recordCount].qty, resultSet.getObject("qty")); + assertThat(resultSet.getObject("id")).isEqualTo(TEST_DATA[recordCount].id); + assertThat(resultSet.getObject("title")).isEqualTo(TEST_DATA[recordCount].title); + assertThat(resultSet.getObject("author")).isEqualTo(TEST_DATA[recordCount].author); + assertThat(resultSet.getObject("price")).isEqualTo(TEST_DATA[recordCount].price); + assertThat(resultSet.getObject("qty")).isEqualTo(TEST_DATA[recordCount].qty); recordCount++; } - assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataInputFormatTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataInputFormatTest.java index a574006d1c16c..65d49cf371c3f 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataInputFormatTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataInputFormatTest.java @@ -35,7 +35,6 @@ import org.apache.flink.table.types.logical.RowType; import org.junit.After; -import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -52,6 +51,7 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.SELECT_ALL_BOOKS_SPLIT_BY_ID; import static org.apache.flink.connector.jdbc.JdbcTestFixture.SELECT_EMPTY; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; +import static org.assertj.core.api.Assertions.assertThat; /** Test suite for {@link JdbcRowDataInputFormat}. */ public class JdbcRowDataInputFormatTest extends JdbcDataTestBase { @@ -197,7 +197,7 @@ public void testJdbcInputFormatWithoutParallelism() throws IOException { .setRowConverter(dialect.getRowConverter(rowType)) .build(); // this query does not exploit parallelism - Assert.assertEquals(1, inputFormat.createInputSplits(1).length); + assertThat(inputFormat.createInputSplits(1)).hasSize(1); inputFormat.openInputFormat(); inputFormat.open(null); RowData row = new GenericRowData(5); @@ -211,7 +211,7 @@ public void testJdbcInputFormatWithoutParallelism() throws IOException { } inputFormat.close(); inputFormat.closeInputFormat(); - Assert.assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } @Test @@ -234,7 +234,7 @@ public void testJdbcInputFormatWithParallelismAndNumericColumnSplitting() throws inputFormat.openInputFormat(); InputSplit[] splits = inputFormat.createInputSplits(1); // this query exploit parallelism (1 split for every id) - Assert.assertEquals(TEST_DATA.length, splits.length); + assertThat(splits).hasSize(TEST_DATA.length); int recordCount = 0; RowData row = new GenericRowData(5); for (InputSplit split : splits) { @@ -249,7 +249,7 @@ public void testJdbcInputFormatWithParallelismAndNumericColumnSplitting() throws inputFormat.close(); } inputFormat.closeInputFormat(); - Assert.assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } @Test @@ -273,7 +273,7 @@ public void testJdbcInputFormatWithoutParallelismAndNumericColumnSplitting() inputFormat.openInputFormat(); InputSplit[] splits = inputFormat.createInputSplits(1); // assert that a single split was generated - Assert.assertEquals(1, splits.length); + assertThat(splits).hasSize(1); int recordCount = 0; RowData row = new GenericRowData(5); for (InputSplit split : splits) { @@ -288,7 +288,7 @@ public void testJdbcInputFormatWithoutParallelismAndNumericColumnSplitting() inputFormat.close(); } inputFormat.closeInputFormat(); - Assert.assertEquals(TEST_DATA.length, recordCount); + assertThat(recordCount).isEqualTo(TEST_DATA.length); } @Test @@ -311,7 +311,7 @@ public void testJdbcInputFormatWithParallelismAndGenericSplitting() throws IOExc inputFormat.openInputFormat(); InputSplit[] splits = inputFormat.createInputSplits(1); // this query exploit parallelism (1 split for every queryParameters row) - Assert.assertEquals(queryParameters.length, splits.length); + assertThat(splits).hasSize(queryParameters.length); verifySplit(splits[0], TEST_DATA[3].id); verifySplit(splits[1], TEST_DATA[0].id + TEST_DATA[1].id); @@ -335,7 +335,7 @@ private void verifySplit(InputSplit split, int expectedIDSum) throws IOException sum += id; } - Assert.assertEquals(expectedIDSum, sum); + assertThat(sum).isEqualTo(expectedIDSum); } @Test @@ -352,7 +352,7 @@ public void testEmptyResults() throws IOException { try { inputFormat.openInputFormat(); inputFormat.open(null); - Assert.assertTrue(inputFormat.reachedEnd()); + assertThat(inputFormat.reachedEnd()).isTrue(); } finally { inputFormat.close(); inputFormat.closeInputFormat(); @@ -360,15 +360,15 @@ public void testEmptyResults() throws IOException { } private static void assertEquals(JdbcTestFixture.TestEntry expected, RowData actual) { - Assert.assertEquals( - expected.id, actual.isNullAt(0) ? null : Integer.valueOf(actual.getInt(0))); - Assert.assertEquals( - expected.title, actual.isNullAt(1) ? null : actual.getString(1).toString()); - Assert.assertEquals( - expected.author, actual.isNullAt(2) ? null : actual.getString(2).toString()); - Assert.assertEquals( - expected.price, actual.isNullAt(3) ? null : Double.valueOf(actual.getDouble(3))); - Assert.assertEquals( - expected.qty, actual.isNullAt(4) ? null : Integer.valueOf(actual.getInt(4))); + assertThat(actual.isNullAt(0) ? null : Integer.valueOf(actual.getInt(0))) + .isEqualTo(expected.id); + assertThat(actual.isNullAt(1) ? null : actual.getString(1).toString()) + .isEqualTo(expected.title); + assertThat(actual.isNullAt(2) ? null : actual.getString(2).toString()) + .isEqualTo(expected.author); + assertThat(actual.isNullAt(3) ? null : Double.valueOf(actual.getDouble(3))) + .isEqualTo(expected.price); + assertThat(actual.isNullAt(4) ? null : Integer.valueOf(actual.getInt(4))) + .isEqualTo(expected.qty); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataLookupFunctionTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataLookupFunctionTest.java index c7546b5cb73b9..ea6df62d1a371 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataLookupFunctionTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcRowDataLookupFunctionTest.java @@ -40,7 +40,7 @@ import java.util.stream.Collectors; import static org.apache.flink.connector.jdbc.JdbcTestFixture.DERBY_EBOOKSHOP_DB; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test suite for {@link JdbcRowDataLookupFunction}. */ public class JdbcRowDataLookupFunctionTest extends JdbcLookupTestBase { @@ -81,7 +81,7 @@ public void testEval() throws Exception { expected.add("+I(2,3,null,23-c2)"); Collections.sort(expected); - assertEquals(expected, result); + assertThat(result).isEqualTo(expected); } @Test @@ -107,7 +107,7 @@ public void testEvalWithCacheMissingKeyPositive() throws Exception { Cache> cache = lookupFunction.getCache(); // empty data should cache - assertEquals(cache.getIfPresent(keyRow), Collections.emptyList()); + assertThat(Collections.emptyList()).isEqualTo(cache.getIfPresent(keyRow)); // put db entry for keyRow // final cache output should also be empty till TTL expires @@ -117,7 +117,7 @@ public void testEvalWithCacheMissingKeyPositive() throws Exception { + " (id1, id2, comment1, comment2) VALUES (4, '9', '49-c1', '49-c2')"); lookupFunction.eval(4, StringData.fromString("9")); - assertEquals(cache.getIfPresent(keyRow), Collections.emptyList()); + assertThat(Collections.emptyList()).isEqualTo(cache.getIfPresent(keyRow)); } @Test @@ -143,7 +143,7 @@ public void testEvalWithCacheMissingKeyNegative() throws Exception { Cache> cache = lookupFunction.getCache(); // empty data should not get cached - assert cache.getIfPresent(keyRow) == null; + assertThat(cache.getIfPresent(keyRow)).isNull(); // put db entry for keyRow // final cache output should contain data @@ -160,7 +160,7 @@ public void testEvalWithCacheMissingKeyNegative() throws Exception { StringData.fromString("1"), StringData.fromString("51-c1"), StringData.fromString("51-c2"))); - assertEquals(cache.getIfPresent(keyRow), expectedOutput); + assertThat(expectedOutput).isEqualTo(cache.getIfPresent(keyRow)); } private JdbcRowDataLookupFunction buildRowDataLookupFunction(JdbcLookupOptions lookupOptions) { diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/utils/JdbcTypeUtilTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/utils/JdbcTypeUtilTest.java index ae967b0e7d4c1..cf5ef28e1e9e9 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/utils/JdbcTypeUtilTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/utils/JdbcTypeUtilTest.java @@ -25,24 +25,21 @@ import java.sql.Types; import static org.apache.flink.connector.jdbc.utils.JdbcTypeUtil.logicalTypeToSqlType; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Testing the type conversions from Flink to SQL types. */ public class JdbcTypeUtilTest { @Test public void testTypeConversions() { - assertEquals(Types.INTEGER, logicalTypeToSqlType(LogicalTypeRoot.INTEGER)); + assertThat(logicalTypeToSqlType(LogicalTypeRoot.INTEGER)).isEqualTo(Types.INTEGER); testUnsupportedType(LogicalTypeRoot.RAW); testUnsupportedType(LogicalTypeRoot.MAP); } private static void testUnsupportedType(LogicalTypeRoot logicalTypeRoot) { - try { - logicalTypeToSqlType(logicalTypeRoot); - fail(); - } catch (IllegalArgumentException ignored) { - } + assertThatThrownBy(() -> logicalTypeToSqlType(logicalTypeRoot)) + .isInstanceOf(IllegalArgumentException.class); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcExactlyOnceSinkE2eTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcExactlyOnceSinkE2eTest.java index 698f7ab6aa1c9..3b24f82d5aa7e 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcExactlyOnceSinkE2eTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcExactlyOnceSinkE2eTest.java @@ -91,7 +91,7 @@ import static org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions.CHECKPOINTING_TIMEOUT; import static org.apache.flink.util.Preconditions.checkArgument; import static org.apache.flink.util.Preconditions.checkState; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.slf4j.event.Level.TRACE; /** A simple end-to-end test for {@link JdbcXaSinkFunction}. */ @@ -233,9 +233,9 @@ public void testInsert() throws Exception { IntStream.range(0, elementsPerSource * dbEnv.getParallelism()) .boxed() .collect(Collectors.toList()); - assertTrue( - insertedIds.toString(), - insertedIds.size() == expectedIds.size() && expectedIds.containsAll(insertedIds)); + assertThat(insertedIds) + .as(insertedIds.toString()) + .containsExactlyInAnyOrderElementsOf(expectedIds); LOG.info( "Test insert for {} finished in {} ms.", dbEnv, diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeImplTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeImplTest.java index 773ca3eeb6978..18e05e2e6a11f 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeImplTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeImplTest.java @@ -31,7 +31,7 @@ import java.sql.Statement; import java.util.Collection; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -61,7 +61,7 @@ public byte[] getBranchQualifier() { public void testRecover() throws Exception { try (XaFacade f = XaFacadeImpl.fromXaDataSource(getDbMetadata().buildXaDataSource())) { f.open(); - assertEquals(0, f.recover().size()); + assertThat(f.recover()).isEmpty(); f.start(XID); // insert some data to prevent database from ignoring the transaction try (Connection c = f.getConnection()) { @@ -75,7 +75,7 @@ public void testRecover() throws Exception { f.open(); Collection recovered = f.recover(); recovered.forEach(f::rollback); - assertEquals(1, recovered.size()); + assertThat(recovered).hasSize(1); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeTestHelper.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeTestHelper.java index 91cf0d48c434d..1372683448776 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeTestHelper.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaFacadeTestHelper.java @@ -33,7 +33,7 @@ import java.util.stream.IntStream; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; class JdbcXaFacadeTestHelper implements AutoCloseable { private final String table; @@ -54,7 +54,7 @@ class JdbcXaFacadeTestHelper implements AutoCloseable { } void assertPreparedTxCountEquals(int expected) { - assertEquals(expected, xaFacade.recover().size()); + assertThat(xaFacade.recover()).hasSize(expected); } void assertDbContentsEquals(JdbcTestCheckpoint... checkpoints) throws SQLException { @@ -68,7 +68,7 @@ void assertDbContentsEquals(IntStream dataIdxStream) throws SQLException { } void assertDbContentsEquals(List expected) throws SQLException { - assertEquals(expected, getInsertedIds()); + assertThat(getInsertedIds()).isEqualTo(expected); } private List getInsertedIds() throws SQLException { diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java index 1fcda0b1b03ee..b788565419409 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java @@ -24,8 +24,8 @@ import org.junit.Test; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** * {@link JdbcXaSinkFunction} tests using Derby DB. Derby supports XA but doesn't use MVCC, so we @@ -69,7 +69,7 @@ public void testTxRollbackOnStartup() throws Exception { xaHelper.assertPreparedTxCountEquals(1); // tx should still be there buildAndInit(); // should cleanup on startup xaHelper.assertPreparedTxCountEquals(0); - assertEquals(0, xaHelper.countInDb()); + assertThat(xaHelper.countInDb()).isEqualTo(0); } @Test diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkH2Test.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkH2Test.java index e4057d37f8637..c86902676df96 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkH2Test.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkH2Test.java @@ -23,7 +23,7 @@ import org.junit.Test; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** * {@link JdbcXaSinkFunction} tests using H2 DB. H2 uses MVCC (so we can e.g. count records while @@ -47,22 +47,19 @@ public void testEmptyCheckpoint() throws Exception { @Test public void testHappyFlow() throws Exception { sinkHelper.emit(TEST_DATA[0]); - assertEquals( - "record should not be inserted before the checkpoint started.", - 0, - xaHelper.countInDb()); + assertThat(xaHelper.countInDb()) + .as("record should not be inserted before the checkpoint started.") + .isEqualTo(0); sinkHelper.snapshotState(Long.MAX_VALUE); - assertEquals( - "record should not be inserted before the checkpoint completed.", - 0, - xaHelper.countInDb()); + assertThat(xaHelper.countInDb()) + .as("record should not be inserted before the checkpoint completed.") + .isEqualTo(0); sinkHelper.notifyCheckpointComplete(Long.MAX_VALUE); - assertEquals( - "record should be inserted after the checkpoint completed.", - 1, - xaHelper.countInDb()); + assertThat(xaHelper.countInDb()) + .as("record should be inserted after the checkpoint completed.") + .isEqualTo(1); } @Test @@ -70,7 +67,7 @@ public void testTwoCheckpointsWithoutData() throws Exception { JdbcXaSinkTestHelper sinkHelper = this.sinkHelper; sinkHelper.snapshotState(1); sinkHelper.snapshotState(2); - assertEquals(0, xaHelper.countInDb()); + assertThat(xaHelper.countInDb()).isEqualTo(0); } @Override diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkNoInsertionTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkNoInsertionTest.java index d1a4dd4072137..4baf319e6d72b 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkNoInsertionTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkNoInsertionTest.java @@ -23,7 +23,7 @@ import org.junit.Test; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests that data is not inserted ahead of time. */ public class JdbcXaSinkNoInsertionTest extends JdbcXaSinkTestBase { @@ -31,26 +31,24 @@ public class JdbcXaSinkNoInsertionTest extends JdbcXaSinkTestBase { @Test public void testNoInsertAfterInvoke() throws Exception { sinkHelper.emit(TEST_DATA[0]); - assertEquals( - "no records should be inserted for incomplete checkpoints.", - 0, - xaHelper.countInDb()); + assertThat(xaHelper.countInDb()) + .as("no records should be inserted for incomplete checkpoints.") + .isEqualTo(0); } @Test public void testNoInsertAfterSnapshot() throws Exception { sinkHelper.emitAndSnapshot(JdbcTestFixture.CP0); - assertEquals( - "no records should be inserted for incomplete checkpoints.", - 0, - xaHelper.countInDb()); + assertThat(xaHelper.countInDb()) + .as("no records should be inserted for incomplete checkpoints.") + .isEqualTo(0); } @Test public void testNoInsertAfterSinkClose() throws Exception { sinkHelper.emitAndSnapshot(JdbcTestFixture.CP0); sinkHelper.close(); - assertEquals(0, xaHelper.countInDb()); + assertThat(xaHelper.countInDb()).isEqualTo(0); } @Test @@ -61,7 +59,7 @@ public void testNoInsertAfterFacadeClose() throws Exception { buildAndInit(0, xaFacade), new TestXaSinkStateHandler()); sinkHelper.emitAndSnapshot(JdbcTestFixture.CP0); } - assertEquals(0, xaHelper.countInDb()); + assertThat(xaHelper.countInDb()).isEqualTo(0); } @Override diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/SemanticXidGeneratorTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/SemanticXidGeneratorTest.java index bfda5d8d6e5c9..63fe55c0887ba 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/SemanticXidGeneratorTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/SemanticXidGeneratorTest.java @@ -27,8 +27,8 @@ import java.util.Set; import java.util.function.Function; -import static junit.framework.TestCase.assertEquals; import static org.apache.flink.connector.jdbc.xa.JdbcXaSinkTestBase.TEST_RUNTIME_CONTEXT; +import static org.assertj.core.api.Assertions.assertThat; /** Simple uniqueness tests for the {@link SemanticXidGenerator}. */ public class SemanticXidGeneratorTest { @@ -59,6 +59,6 @@ private void checkUniqueness(Function generate) { // We "drop" the branch id because uniqueness of gtrid is important generated.add(new XidImpl(0, generate.apply(i).getGlobalTransactionId(), new byte[0])); } - assertEquals(COUNT, generated.size()); + assertThat(generated).hasSize(COUNT); } } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/XidImplTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/XidImplTest.java index e391bfc0c37a0..985c004c257b3 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/XidImplTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/XidImplTest.java @@ -23,8 +23,7 @@ import static javax.transaction.xa.Xid.MAXBQUALSIZE; import static javax.transaction.xa.Xid.MAXGTRIDSIZE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link XidImpl}. */ public class XidImplTest { @@ -35,24 +34,26 @@ public void testXidsEqual() { XidImpl other = new XidImpl( XID.getFormatId(), XID.getGlobalTransactionId(), XID.getBranchQualifier()); - assertEquals(XID, other); - assertEquals(XID.hashCode(), other.hashCode()); + assertThat(other).isEqualTo(XID); + assertThat(other.hashCode()).isEqualTo(XID.hashCode()); } @Test public void testXidsNotEqual() { - assertNotEquals( - XID, new XidImpl(0, XID.getGlobalTransactionId(), XID.getBranchQualifier())); - assertNotEquals( - XID, - new XidImpl( - XID.getFormatId(), randomBytes(MAXGTRIDSIZE), XID.getBranchQualifier())); - assertNotEquals( - XID, - new XidImpl( - XID.getFormatId(), - XID.getGlobalTransactionId(), - randomBytes(MAXBQUALSIZE))); + assertThat(new XidImpl(0, XID.getGlobalTransactionId(), XID.getBranchQualifier())) + .isNotEqualTo(XID); + assertThat( + new XidImpl( + XID.getFormatId(), + randomBytes(MAXGTRIDSIZE), + XID.getBranchQualifier())) + .isNotEqualTo(XID); + assertThat( + new XidImpl( + XID.getFormatId(), + XID.getGlobalTransactionId(), + randomBytes(MAXBQUALSIZE))) + .isNotEqualTo(XID); } private static byte[] randomBytes(int size) { From b6f4636c5ee6a35315acab82d2ecb1260f936bfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 6 May 2022 11:10:40 +0200 Subject: [PATCH 06/16] [FLINK-27185][connector] Convert connector-kafka module to assertj Co-authored-by: slinkydeveloper --- .../sink/KafkaCommittableSerializerTest.java | 4 +- ...aRecordSerializationSchemaBuilderTest.java | 96 ++++---- .../kafka/sink/KafkaSinkBuilderTest.java | 17 +- .../connector/kafka/sink/KafkaSinkITCase.java | 95 +++---- .../kafka/sink/KafkaTransactionLogITCase.java | 38 +-- .../kafka/sink/KafkaWriterITCase.java | 4 +- .../sink/KafkaWriterStateSerializerTest.java | 4 +- .../kafka/sink/TransactionIdFactoryTest.java | 5 +- .../sink/TransactionToAbortCheckerTest.java | 9 +- .../kafka/source/KafkaSourceBuilderTest.java | 120 ++++----- .../kafka/source/KafkaSourceITCase.java | 21 +- .../enumerator/KafkaEnumeratorTest.java | 77 +++--- .../KafkaSourceEnumStateSerializerTest.java | 6 +- .../initializer/OffsetsInitializerTest.java | 40 +-- .../subscriber/KafkaSubscriberTest.java | 40 ++- .../metrics/KafkaSourceReaderMetricsTest.java | 33 ++- .../reader/KafkaPartitionSplitReaderTest.java | 68 +++-- .../source/reader/KafkaSourceReaderTest.java | 21 +- .../KafkaRecordDeserializationSchemaTest.java | 42 ++-- .../kafka/testutils/KafkaSourceTestEnv.java | 4 +- .../kafka/FlinkFixedPartitionerTest.java | 30 +-- .../FlinkKafkaConsumerBaseMigrationTest.java | 38 ++- .../kafka/FlinkKafkaConsumerBaseTest.java | 231 ++++++++--------- .../FlinkKafkaInternalProducerITCase.java | 19 +- .../kafka/FlinkKafkaProducerBaseTest.java | 59 +++-- .../kafka/FlinkKafkaProducerITCase.java | 49 ++-- .../kafka/FlinkKafkaProducerTest.java | 9 +- ...JSONKeyValueDeserializationSchemaTest.java | 31 +-- .../kafka/KafkaConsumerTestBase.java | 66 +++-- .../kafka/KafkaProducerTestBase.java | 8 +- .../kafka/KafkaShortRetentionTestBase.java | 66 ----- .../connectors/kafka/KafkaTestBase.java | 2 +- .../kafka/KafkaTestEnvironmentImpl.java | 2 +- .../kafka/internals/AbstractFetcherTest.java | 24 +- .../AbstractFetcherWatermarksTest.java | 110 ++++----- .../AbstractPartitionDiscovererTest.java | 173 ++++++------- .../internals/ClosableBlockingQueueTest.java | 232 +++++++++--------- .../internals/KafkaTopicPartitionTest.java | 15 +- .../internals/KafkaTopicsDescriptorTest.java | 5 +- .../kafka/shuffle/KafkaShuffleITCase.java | 65 +++-- .../table/KafkaConnectorOptionsUtilTest.java | 81 ++---- .../table/KafkaDynamicTableFactoryTest.java | 2 +- .../kafka/table/KafkaTableITCase.java | 18 +- .../kafka/table/KafkaTableTestUtils.java | 14 +- .../kafka/table/ReducingUpsertWriterTest.java | 16 +- .../UpsertKafkaDynamicTableFactoryTest.java | 46 ++-- .../kafka/table/UpsertKafkaTableITCase.java | 13 +- .../testutils/TestPartitionDiscoverer.java | 14 +- 48 files changed, 1014 insertions(+), 1168 deletions(-) diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java index bcf06e91b2160..9f1e8126e79b8 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaCommittableSerializerTest.java @@ -23,7 +23,7 @@ import java.io.IOException; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** * Tests for serializing and deserialzing {@link KafkaCommittable} with {@link @@ -39,6 +39,6 @@ public void testCommittableSerDe() throws IOException { final short epoch = 5; final KafkaCommittable committable = new KafkaCommittable(1L, epoch, transactionalId, null); final byte[] serialized = SERIALIZER.serialize(committable); - assertEquals(committable, SERIALIZER.deserialize(1, serialized)); + assertThat(SERIALIZER.deserialize(1, serialized)).isEqualTo(committable); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java index 2ed2c54a587cc..08de121d17168 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaRecordSerializationSchemaBuilderTest.java @@ -42,12 +42,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; -import static org.junit.jupiter.api.Assertions.assertArrayEquals; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for {@link KafkaRecordSerializationSchemaBuilder}. */ public class KafkaRecordSerializationSchemaBuilderTest extends TestLogger { @@ -77,34 +73,33 @@ public void testDoNotAllowMultipleValueSerializer() { @Test public void testDoNotAllowMultipleTopicSelector() { - assertThrows( - IllegalStateException.class, - () -> - KafkaRecordSerializationSchema.builder() - .setTopicSelector(e -> DEFAULT_TOPIC) - .setTopic(DEFAULT_TOPIC)); - assertThrows( - IllegalStateException.class, - () -> - KafkaRecordSerializationSchema.builder() - .setTopic(DEFAULT_TOPIC) - .setTopicSelector(e -> DEFAULT_TOPIC)); + assertThatThrownBy( + () -> + KafkaRecordSerializationSchema.builder() + .setTopicSelector(e -> DEFAULT_TOPIC) + .setTopic(DEFAULT_TOPIC)) + .isInstanceOf(IllegalStateException.class); + assertThatThrownBy( + () -> + KafkaRecordSerializationSchema.builder() + .setTopic(DEFAULT_TOPIC) + .setTopicSelector(e -> DEFAULT_TOPIC)) + .isInstanceOf(IllegalStateException.class); } @Test public void testExpectTopicSelector() { - assertThrows( - IllegalStateException.class, - KafkaRecordSerializationSchema.builder() - .setValueSerializationSchema(new SimpleStringSchema()) - ::build); + assertThatThrownBy( + KafkaRecordSerializationSchema.builder() + .setValueSerializationSchema(new SimpleStringSchema()) + ::build) + .isInstanceOf(IllegalStateException.class); } @Test public void testExpectValueSerializer() { - assertThrows( - IllegalStateException.class, - KafkaRecordSerializationSchema.builder().setTopic(DEFAULT_TOPIC)::build); + assertThatThrownBy(KafkaRecordSerializationSchema.builder().setTopic(DEFAULT_TOPIC)::build) + .isInstanceOf(IllegalStateException.class); } @Test @@ -122,14 +117,14 @@ public void testSerializeRecordWithTopicSelector() { final KafkaRecordSerializationSchema schema = builder.setValueSerializationSchema(serializationSchema).build(); final ProducerRecord record = schema.serialize("a", null, null); - assertEquals("topic-a", record.topic()); - assertNull(record.key()); - assertArrayEquals(serializationSchema.serialize("a"), record.value()); + assertThat(record.topic()).isEqualTo("topic-a"); + assertThat(record.key()).isNull(); + assertThat(record.value()).isEqualTo(serializationSchema.serialize("a")); final ProducerRecord record2 = schema.serialize("b", null, null); - assertEquals("topic-b", record2.topic()); - assertNull(record2.key()); - assertArrayEquals(serializationSchema.serialize("b"), record2.value()); + assertThat(record2.topic()).isEqualTo("topic-b"); + assertThat(record2.key()).isNull(); + assertThat(record2.value()).isEqualTo(serializationSchema.serialize("b")); } @Test @@ -147,8 +142,8 @@ public void testSerializeRecordWithPartitioner() throws Exception { final KafkaRecordSerializationSchema.KafkaSinkContext sinkContext = new TestSinkContext(); schema.open(null, sinkContext); final ProducerRecord record = schema.serialize("a", sinkContext, null); - assertEquals(partition, record.partition()); - assertTrue(opened.get()); + assertThat(record.partition()).isEqualTo(partition); + assertThat(opened.get()).isTrue(); } @Test @@ -161,8 +156,8 @@ public void testSerializeRecordWithKey() { .setKeySerializationSchema(serializationSchema) .build(); final ProducerRecord record = schema.serialize("a", null, null); - assertArrayEquals(record.key(), serializationSchema.serialize("a")); - assertArrayEquals(record.value(), serializationSchema.serialize("a")); + assertThat(serializationSchema.serialize("a")).isEqualTo(record.key()); + assertThat(serializationSchema.serialize("a")).isEqualTo(record.value()); } @Test @@ -177,9 +172,9 @@ public void testKafkaKeySerializerWrapperWithoutConfigurable() throws Exception .setKafkaKeySerializer(SimpleStringSerializer.class, config) .build(); open(schema); - assertEquals(configuration, config); - assertTrue(isKeySerializer); - assertTrue(configurableConfiguration.isEmpty()); + assertThat(config).isEqualTo(configuration); + assertThat(isKeySerializer).isTrue(); + assertThat(configurableConfiguration).isEmpty(); } @Test @@ -191,9 +186,9 @@ public void testKafkaValueSerializerWrapperWithoutConfigurable() throws Exceptio .setKafkaValueSerializer(SimpleStringSerializer.class, config) .build(); open(schema); - assertEquals(configuration, config); - assertFalse(isKeySerializer); - assertTrue(configurableConfiguration.isEmpty()); + assertThat(config).isEqualTo(configuration); + assertThat(isKeySerializer).isFalse(); + assertThat(configurableConfiguration).isEmpty(); } @Test @@ -205,11 +200,11 @@ public void testSerializeRecordWithKafkaSerializer() throws Exception { .setKafkaValueSerializer(ConfigurableStringSerializer.class, config) .build(); open(schema); - assertEquals(configurableConfiguration, config); - assertTrue(configuration.isEmpty()); + assertThat(config).isEqualTo(configurableConfiguration); + assertThat(configuration).isEmpty(); final Deserializer deserializer = new StringDeserializer(); final ProducerRecord record = schema.serialize("a", null, null); - assertEquals("a", deserializer.deserialize(DEFAULT_TOPIC, record.value())); + assertThat(deserializer.deserialize(DEFAULT_TOPIC, record.value())).isEqualTo("a"); } @Test @@ -223,19 +218,19 @@ public void testSerializeRecordWithTimestamp() { .build(); final ProducerRecord recordWithTimestamp = schema.serialize("a", null, 100L); - assertEquals(100L, (long) recordWithTimestamp.timestamp()); + assertThat((long) recordWithTimestamp.timestamp()).isEqualTo(100L); final ProducerRecord recordWithTimestampZero = schema.serialize("a", null, 0L); - assertEquals(0L, (long) recordWithTimestampZero.timestamp()); + assertThat((long) recordWithTimestampZero.timestamp()).isEqualTo(0L); final ProducerRecord recordWithoutTimestamp = schema.serialize("a", null, null); - assertNull(recordWithoutTimestamp.timestamp()); + assertThat(recordWithoutTimestamp.timestamp()).isNull(); final ProducerRecord recordWithInvalidTimestamp = schema.serialize("a", null, -100L); - assertNull(recordWithInvalidTimestamp.timestamp()); + assertThat(recordWithInvalidTimestamp.timestamp()).isNull(); } private static void assertOnlyOneSerializerAllowed( @@ -255,7 +250,8 @@ private static void assertOnlyOneSerializerAllowed( KafkaRecordSerializationSchemaBuilder, KafkaRecordSerializationSchemaBuilder> updater : serializers) { - assertThrows(IllegalStateException.class, () -> updater.apply(builder)); + assertThatThrownBy(() -> updater.apply(builder)) + .isInstanceOf(IllegalStateException.class); } } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java index 2f1f1acac831b..9351bf6ddde8b 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkBuilderTest.java @@ -27,8 +27,7 @@ import java.util.Properties; import java.util.function.Consumer; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link KafkaSinkBuilder}. */ public class KafkaSinkBuilderTest extends TestLogger { @@ -46,13 +45,13 @@ public void testPropertyHandling() { validateProducerConfig( getBasicBuilder(), p -> { - Arrays.stream(DEFAULT_KEYS).forEach(k -> assertTrue(k, p.containsKey(k))); + Arrays.stream(DEFAULT_KEYS).forEach(k -> assertThat(p).containsKey(k)); }); validateProducerConfig( getBasicBuilder().setProperty("k1", "v1"), p -> { - Arrays.stream(DEFAULT_KEYS).forEach(k -> assertTrue(k, p.containsKey(k))); + Arrays.stream(DEFAULT_KEYS).forEach(k -> assertThat(p).containsKey(k)); p.containsKey("k1"); }); @@ -63,8 +62,8 @@ public void testPropertyHandling() { validateProducerConfig( getBasicBuilder().setKafkaProducerConfig(testConf), p -> { - Arrays.stream(DEFAULT_KEYS).forEach(k -> assertTrue(k, p.containsKey(k))); - testConf.forEach((k, v) -> assertEquals(v, p.get(k))); + Arrays.stream(DEFAULT_KEYS).forEach(k -> assertThat(p).containsKey(k)); + testConf.forEach((k, v) -> assertThat(p.get(k)).isEqualTo(v)); }); validateProducerConfig( @@ -73,9 +72,9 @@ public void testPropertyHandling() { .setKafkaProducerConfig(testConf) .setProperty("k2", "correct"), p -> { - Arrays.stream(DEFAULT_KEYS).forEach(k -> assertTrue(k, p.containsKey(k))); - assertEquals("v1", p.get("k1")); - assertEquals("correct", p.get("k2")); + Arrays.stream(DEFAULT_KEYS).forEach(k -> assertThat(p).containsKey(k)); + assertThat(p.get("k1")).isEqualTo("v1"); + assertThat(p.get("k2")).isEqualTo("correct"); }); } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java index 2ae414597a671..d7f59c91b578e 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java @@ -109,13 +109,10 @@ import static org.apache.flink.connector.kafka.testutils.KafkaUtil.createKafkaContainer; import static org.apache.flink.util.DockerImageVersions.KAFKA; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.hasItems; -import static org.hamcrest.MatcherAssert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.contains; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; /** Tests for using KafkaSink writing to a Kafka cluster. */ public class KafkaSinkITCase extends TestLogger { @@ -223,8 +220,7 @@ public void testRecoveryWithAtLeastOnceGuarantee() throws Exception { testRecoveryWithAssertion( DeliveryGuarantee.AT_LEAST_ONCE, 1, - (records) -> - assertThat(records, hasItems(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L))); + (records) -> assertThat(records).contains(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L)); } @Test @@ -233,12 +229,18 @@ public void testRecoveryWithExactlyOnceGuarantee() throws Exception { DeliveryGuarantee.EXACTLY_ONCE, 1, (records) -> - assertThat( - records, - contains( - LongStream.range(1, lastCheckpointedRecord.get().get() + 1) - .boxed() - .toArray()))); + assertThat(records) + .satisfies( + matching( + contains( + LongStream.range( + 1, + lastCheckpointedRecord + .get() + .get() + + 1) + .boxed() + .toArray())))); } @Test @@ -247,12 +249,18 @@ public void testRecoveryWithExactlyOnceGuaranteeAndConcurrentCheckpoints() throw DeliveryGuarantee.EXACTLY_ONCE, 2, (records) -> - assertThat( - records, - contains( - LongStream.range(1, lastCheckpointedRecord.get().get() + 1) - .boxed() - .toArray()))); + assertThat(records) + .satisfies( + matching( + contains( + LongStream.range( + 1, + lastCheckpointedRecord + .get() + .get() + + 1) + .boxed() + .toArray())))); } @Test @@ -271,9 +279,8 @@ public void testAbortTransactionsOfPendingCheckpointsAfterFailure() throws Excep try { executeWithMapper(new FailAsyncCheckpointMapper(1), config, "firstPrefix"); } catch (Exception e) { - assertThat( - e.getCause().getCause().getMessage(), - containsString("Exceeded checkpoint tolerable failure")); + assertThat(e.getCause().getCause().getMessage()) + .contains("Exceeded checkpoint tolerable failure"); } final File completedCheckpoint = TestUtils.getMostRecentCompletedCheckpoint(checkpointDir); @@ -286,12 +293,13 @@ public void testAbortTransactionsOfPendingCheckpointsAfterFailure() throws Excep new FailingCheckpointMapper(failed, lastCheckpointedRecord), config, "newPrefix"); final List> collectedRecords = drainAllRecordsFromTopic(topic, true); - assertThat( - deserializeValues(collectedRecords), - contains( - LongStream.range(1, lastCheckpointedRecord.get().get() + 1) - .boxed() - .toArray())); + assertThat(deserializeValues(collectedRecords)) + .satisfies( + matching( + contains( + LongStream.range(1, lastCheckpointedRecord.get().get() + 1) + .boxed() + .toArray()))); } @Test @@ -302,11 +310,10 @@ public void testAbortTransactionsAfterScaleInBeforeFirstCheckpoint() throws Exce try { executeWithMapper(new FailAsyncCheckpointMapper(0), config, null); } catch (Exception e) { - assertThat( - e.getCause().getCause().getMessage(), - containsString("Exceeded checkpoint tolerable failure")); + assertThat(e.getCause().getCause().getMessage()) + .contains("Exceeded checkpoint tolerable failure"); } - assertTrue(deserializeValues(drainAllRecordsFromTopic(topic, true)).isEmpty()); + assertThat(deserializeValues(drainAllRecordsFromTopic(topic, true))).isEmpty(); // Second job aborts all transactions from previous runs with higher parallelism config.set(CoreOptions.DEFAULT_PARALLELISM, 1); @@ -315,12 +322,13 @@ public void testAbortTransactionsAfterScaleInBeforeFirstCheckpoint() throws Exce new FailingCheckpointMapper(failed, lastCheckpointedRecord), config, null); final List> collectedRecords = drainAllRecordsFromTopic(topic, true); - assertThat( - deserializeValues(collectedRecords), - contains( - LongStream.range(1, lastCheckpointedRecord.get().get() + 1) - .boxed() - .toArray())); + assertThat(deserializeValues(collectedRecords)) + .satisfies( + matching( + contains( + LongStream.range(1, lastCheckpointedRecord.get().get() + 1) + .boxed() + .toArray()))); } private void executeWithMapper( @@ -408,10 +416,11 @@ private void writeRecordsToKafka( drainAllRecordsFromTopic( topic, deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE); final long recordsCount = expectedRecords.get().get(); - assertEquals(collectedRecords.size(), recordsCount); - assertThat( - deserializeValues(collectedRecords), - contains(LongStream.range(1, recordsCount + 1).boxed().toArray())); + assertThat(recordsCount).isEqualTo(collectedRecords.size()); + assertThat(deserializeValues(collectedRecords)) + .satisfies( + matching( + contains(LongStream.range(1, recordsCount + 1).boxed().toArray()))); checkProducerLeak(); } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java index b20adb6375d13..7b0884e494fed 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java @@ -46,7 +46,8 @@ import static org.apache.flink.connector.kafka.sink.KafkaTransactionLog.TransactionState.PrepareCommit; import static org.apache.flink.connector.kafka.testutils.KafkaUtil.createKafkaContainer; import static org.apache.flink.util.DockerImageVersions.KAFKA; -import static org.hamcrest.MatcherAssert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.containsInAnyOrder; /** Tests for {@link KafkaTransactionLog} to retrieve abortable Kafka transactions. */ @@ -77,21 +78,26 @@ public void testGetTransactions() { final KafkaTransactionLog transactionLog = new KafkaTransactionLog(getKafkaClientConfiguration()); final List transactions = transactionLog.getTransactions(); - assertThat( - transactions, - containsInAnyOrder( - new TransactionRecord(buildTransactionalId(1), Empty), - new TransactionRecord(buildTransactionalId(1), Ongoing), - new TransactionRecord(buildTransactionalId(1), PrepareCommit), - new TransactionRecord(buildTransactionalId(1), CompleteCommit), - new TransactionRecord(buildTransactionalId(2), Empty), - new TransactionRecord(buildTransactionalId(2), Ongoing), - new TransactionRecord(buildTransactionalId(2), PrepareAbort), - new TransactionRecord(buildTransactionalId(2), CompleteAbort), - new TransactionRecord(buildTransactionalId(3), Empty), - new TransactionRecord(buildTransactionalId(3), Ongoing), - new TransactionRecord(buildTransactionalId(4), Empty), - new TransactionRecord(buildTransactionalId(4), Ongoing))); + assertThat(transactions) + .satisfies( + matching( + containsInAnyOrder( + new TransactionRecord(buildTransactionalId(1), Empty), + new TransactionRecord(buildTransactionalId(1), Ongoing), + new TransactionRecord( + buildTransactionalId(1), PrepareCommit), + new TransactionRecord( + buildTransactionalId(1), CompleteCommit), + new TransactionRecord(buildTransactionalId(2), Empty), + new TransactionRecord(buildTransactionalId(2), Ongoing), + new TransactionRecord( + buildTransactionalId(2), PrepareAbort), + new TransactionRecord( + buildTransactionalId(2), CompleteAbort), + new TransactionRecord(buildTransactionalId(3), Empty), + new TransactionRecord(buildTransactionalId(3), Ongoing), + new TransactionRecord(buildTransactionalId(4), Empty), + new TransactionRecord(buildTransactionalId(4), Ongoing)))); } private void committedTransaction(long id) { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java index 8c7e50aa70403..77898403d6946 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterITCase.java @@ -43,7 +43,6 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.serialization.ByteArraySerializer; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -385,8 +384,7 @@ private void assertKafkaMetricNotPresent( config.put(configKey, configValue); try (final KafkaWriter ignored = createWriterWithConfiguration(config, guarantee)) { - Assertions.assertFalse( - metricListener.getGauge(KAFKA_METRIC_WITH_GROUP_NAME).isPresent()); + assertThat(metricListener.getGauge(KAFKA_METRIC_WITH_GROUP_NAME)).isNotPresent(); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java index afdd43001969e..3df0ea88c3f4e 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaWriterStateSerializerTest.java @@ -23,7 +23,7 @@ import java.io.IOException; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** * Tests for serializing and deserialzing {@link KafkaWriterState} with {@link @@ -37,6 +37,6 @@ public class KafkaWriterStateSerializerTest extends TestLogger { public void testStateSerDe() throws IOException { final KafkaWriterState state = new KafkaWriterState("idPrefix"); final byte[] serialized = SERIALIZER.serialize(state); - assertEquals(state, SERIALIZER.deserialize(1, serialized)); + assertThat(SERIALIZER.deserialize(1, serialized)).isEqualTo(state); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java index e54d0a3c6122d..982d58e333b1d 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionIdFactoryTest.java @@ -21,7 +21,7 @@ import org.junit.Test; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link TransactionalIdFactory}. */ public class TransactionIdFactoryTest extends TestLogger { @@ -29,6 +29,7 @@ public class TransactionIdFactoryTest extends TestLogger { @Test public void testBuildTransactionalId() { final String expected = "prefix-0-2"; - assertEquals(expected, TransactionalIdFactory.buildTransactionalId("prefix", 0, 2L)); + assertThat(TransactionalIdFactory.buildTransactionalId("prefix", 0, 2L)) + .isEqualTo(expected); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java index a502ad0a1d053..1b65c9d331ae6 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/TransactionToAbortCheckerTest.java @@ -26,8 +26,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link TransactionsToAbortChecker}. */ public class TransactionToAbortCheckerTest extends TestLogger { @@ -47,7 +46,7 @@ public void testMustAbortTransactionsWithSameSubtaskIdAndHigherCheckpointOffset( 3, ImmutableMap.of(3L, "keep", 4L, "keep")); final List transactionsToAbort = checker.getTransactionsToAbort(openTransactions); - assertEquals(4, transactionsToAbort.size()); + assertThat(transactionsToAbort).hasSize(4); assertThatAbortCorrectTransaction(transactionsToAbort); } @@ -66,11 +65,11 @@ public void testMustAbortTransactionsIfLowestCheckpointOffsetIsMinimumOffset() { 5, ImmutableMap.of(1L, "keep")); final List transactionsToAbort = checker.getTransactionsToAbort(openTransactions); - assertEquals(4, transactionsToAbort.size()); + assertThat(transactionsToAbort).hasSize(4); assertThatAbortCorrectTransaction(transactionsToAbort); } private static void assertThatAbortCorrectTransaction(List abortedTransactions) { - assertTrue(abortedTransactions.stream().allMatch(t -> t.equals(ABORT))); + assertThat(abortedTransactions.stream().allMatch(t -> t.equals(ABORT))).isTrue(); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java index 9f4df0d91314b..26a13d3158f2f 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceBuilderTest.java @@ -26,16 +26,15 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.StringDeserializer; -import org.hamcrest.CoreMatchers; -import org.hamcrest.MatcherAssert; -import org.junit.Assert; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import java.util.HashMap; import java.util.Map; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + /** Tests for {@link KafkaSourceBuilder}. */ @ExtendWith(TestLoggerExtension.class) public class KafkaSourceBuilderTest { @@ -44,69 +43,67 @@ public class KafkaSourceBuilderTest { public void testBuildSourceWithGroupId() { final KafkaSource kafkaSource = getBasicBuilder().setGroupId("groupId").build(); // Commit on checkpoint should be enabled by default - Assertions.assertTrue( - kafkaSource - .getConfiguration() - .get(KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT)); + assertThat( + kafkaSource + .getConfiguration() + .get(KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT)) + .isTrue(); // Auto commit should be disabled by default - Assertions.assertFalse( - kafkaSource - .getConfiguration() - .get( - ConfigOptions.key(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) - .booleanType() - .noDefaultValue())); + assertThat( + kafkaSource + .getConfiguration() + .get( + ConfigOptions.key(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) + .booleanType() + .noDefaultValue())) + .isFalse(); } @Test public void testBuildSourceWithoutGroupId() { final KafkaSource kafkaSource = getBasicBuilder().build(); // Commit on checkpoint and auto commit should be disabled because group.id is not specified - Assertions.assertFalse( - kafkaSource - .getConfiguration() - .get(KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT)); - Assertions.assertFalse( - kafkaSource - .getConfiguration() - .get( - ConfigOptions.key(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) - .booleanType() - .noDefaultValue())); + assertThat( + kafkaSource + .getConfiguration() + .get(KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT)) + .isFalse(); + assertThat( + kafkaSource + .getConfiguration() + .get( + ConfigOptions.key(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) + .booleanType() + .noDefaultValue())) + .isFalse(); } @Test public void testEnableCommitOnCheckpointWithoutGroupId() { - final IllegalStateException exception = - Assert.assertThrows( - IllegalStateException.class, + assertThatThrownBy( () -> getBasicBuilder() .setProperty( KafkaSourceOptions.COMMIT_OFFSETS_ON_CHECKPOINT .key(), "true") - .build()); - MatcherAssert.assertThat( - exception.getMessage(), - CoreMatchers.containsString( - "Property group.id is required when offset commit is enabled")); + .build()) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "Property group.id is required when offset commit is enabled"); } @Test public void testEnableAutoCommitWithoutGroupId() { - final IllegalStateException exception = - Assertions.assertThrows( - IllegalStateException.class, + assertThatThrownBy( () -> getBasicBuilder() .setProperty( ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") - .build()); - MatcherAssert.assertThat( - exception.getMessage(), - CoreMatchers.containsString( - "Property group.id is required when offset commit is enabled")); + .build()) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "Property group.id is required when offset commit is enabled"); } @Test @@ -120,35 +117,27 @@ public void testDisableOffsetCommitWithoutGroupId() { @Test public void testUsingCommittedOffsetsInitializerWithoutGroupId() { // Using OffsetsInitializer#committedOffsets as starting offsets - final IllegalStateException startingOffsetException = - Assertions.assertThrows( - IllegalStateException.class, + assertThatThrownBy( () -> getBasicBuilder() .setStartingOffsets(OffsetsInitializer.committedOffsets()) - .build()); - MatcherAssert.assertThat( - startingOffsetException.getMessage(), - CoreMatchers.containsString( - "Property group.id is required when using committed offset for offsets initializer")); + .build()) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "Property group.id is required when using committed offset for offsets initializer"); // Using OffsetsInitializer#committedOffsets as stopping offsets - final IllegalStateException stoppingOffsetException = - Assertions.assertThrows( - IllegalStateException.class, + assertThatThrownBy( () -> getBasicBuilder() .setBounded(OffsetsInitializer.committedOffsets()) - .build()); - MatcherAssert.assertThat( - stoppingOffsetException.getMessage(), - CoreMatchers.containsString( - "Property group.id is required when using committed offset for offsets initializer")); + .build()) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "Property group.id is required when using committed offset for offsets initializer"); // Using OffsetsInitializer#offsets to manually specify committed offset as starting offset - final IllegalStateException specificStartingOffsetException = - Assertions.assertThrows( - IllegalStateException.class, + assertThatThrownBy( () -> { final Map offsetMap = new HashMap<>(); offsetMap.put( @@ -157,11 +146,10 @@ public void testUsingCommittedOffsetsInitializerWithoutGroupId() { getBasicBuilder() .setStartingOffsets(OffsetsInitializer.offsets(offsetMap)) .build(); - }); - MatcherAssert.assertThat( - specificStartingOffsetException.getMessage(), - CoreMatchers.containsString( - "Property group.id is required because partition topic-0 is initialized with committed offset")); + }) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining( + "Property group.id is required because partition topic-0 is initialized with committed offset"); } private KafkaSourceBuilder getBasicBuilder() { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java index 72690961e6b73..8b8e7c82b0d79 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/KafkaSourceITCase.java @@ -81,7 +81,6 @@ import static org.apache.flink.connector.kafka.testutils.KafkaSourceExternalContext.SplitMappingMode.PARTITION; import static org.apache.flink.connector.kafka.testutils.KafkaSourceExternalContext.SplitMappingMode.TOPIC; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; /** Unite test class for {@link KafkaSource}. */ public class KafkaSourceITCase { @@ -142,10 +141,9 @@ public void testTimestamp(boolean enableObjectReuse) throws Throwable { stream.addSink(new DiscardingSink<>()); JobExecutionResult result = env.execute(); - assertEquals( - Arrays.asList( - currentTimestamp + 1L, currentTimestamp + 2L, currentTimestamp + 3L), - result.getAccumulatorResult("timestamp")); + assertThat(result.>getAccumulatorResult("timestamp")) + .containsExactly( + currentTimestamp + 1L, currentTimestamp + 2L, currentTimestamp + 3L); } @ParameterizedTest(name = "Object reuse in deserializer = {arguments}") @@ -211,7 +209,7 @@ public void testValueOnlyDeserializer() throws Exception { // Since we have two topics, the expected sum value should be doubled expectedSum *= 2; - assertEquals(expectedSum, actualSum.get()); + assertThat(actualSum.get()).isEqualTo(expectedSum); } @ParameterizedTest(name = "Object reuse in deserializer = {arguments}") @@ -445,11 +443,12 @@ public void invoke(PartitionAndValue value, Context context) { (tp, values) -> { int firstExpectedValue = Integer.parseInt(tp.substring(tp.indexOf('-') + 1)); for (int i = 0; i < values.size(); i++) { - assertEquals( - firstExpectedValue + i, - (int) values.get(i), - String.format( - "The %d-th value for partition %s should be %d", i, tp, i)); + assertThat((int) values.get(i)) + .as( + String.format( + "The %d-th value for partition %s should be %d", + i, tp, i)) + .isEqualTo(firstExpectedValue + i); } }); } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java index 2a3200b71d658..ece1a4da9ae0d 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaEnumeratorTest.java @@ -51,9 +51,7 @@ import java.util.StringJoiner; import java.util.regex.Pattern; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Unit tests for {@link KafkaSourceEnumerator}. */ public class KafkaEnumeratorTest { @@ -96,11 +94,10 @@ public void testStartWithDiscoverPartitionsOnce() throws Exception { // Start the enumerator and it should schedule a one time task to discover and assign // partitions. enumerator.start(); - assertTrue(context.getPeriodicCallables().isEmpty()); - assertEquals( - "A one time partition discovery callable should have been scheduled", - 1, - context.getOneTimeCallables().size()); + assertThat(context.getPeriodicCallables()).isEmpty(); + assertThat(context.getOneTimeCallables()) + .as("A one time partition discovery callable should have been scheduled") + .hasSize(1); } } @@ -114,11 +111,10 @@ public void testStartWithPeriodicPartitionDiscovery() throws Exception { // Start the enumerator and it should schedule a one time task to discover and assign // partitions. enumerator.start(); - assertTrue(context.getOneTimeCallables().isEmpty()); - assertEquals( - "A periodic partition discovery callable should have been scheduled", - 1, - context.getPeriodicCallables().size()); + assertThat(context.getOneTimeCallables()).isEmpty(); + assertThat(context.getPeriodicCallables()) + .as("A periodic partition discovery callable should have been scheduled") + .hasSize(1); } } @@ -136,7 +132,7 @@ public void testDiscoverPartitionsTriggersAssignments() throws Throwable { // register reader 0. registerReader(context, enumerator, READER0); registerReader(context, enumerator, READER1); - assertTrue(context.getSplitsAssignmentSequence().isEmpty()); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); // Run the partition discover callable and check the partition assignment. runOneTimePartitionDiscovery(context); @@ -158,7 +154,7 @@ public void testReaderRegistrationTriggersAssignments() throws Throwable { // partitions. enumerator.start(); runOneTimePartitionDiscovery(context); - assertTrue(context.getSplitsAssignmentSequence().isEmpty()); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); registerReader(context, enumerator, READER0); verifyLastReadersAssignments( @@ -185,10 +181,9 @@ public void testDiscoverPartitionsPeriodically() throws Throwable { // invoke partition discovery callable again and there should be no new assignments. runPeriodicPartitionDiscovery(context); - assertEquals( - "No assignments should be made because there is no partition change", - 2, - context.getSplitsAssignmentSequence().size()); + assertThat(context.getSplitsAssignmentSequence()) + .as("No assignments should be made because there is no partition change") + .hasSize(2); // create the dynamic topic. adminClient @@ -238,10 +233,9 @@ public void testAddSplitsBack() throws Throwable { enumerator.addSplitsBack( context.getSplitsAssignmentSequence().get(0).assignment().get(READER0), READER0); - assertEquals( - "The added back splits should have not been assigned", - 2, - context.getSplitsAssignmentSequence().size()); + assertThat(context.getSplitsAssignmentSequence()) + .as("The added back splits should have not been assigned") + .hasSize(2); // Simulate a reader recovery. registerReader(context, enumerator, READER0); @@ -275,7 +269,7 @@ public void testWorkWithPreexistingAssignments() throws Throwable { runPeriodicPartitionDiscovery(context2); registerReader(context2, enumerator, READER0); - assertTrue(context2.getSplitsAssignmentSequence().isEmpty()); + assertThat(context2.getSplitsAssignmentSequence()).isEmpty(); registerReader(context2, enumerator, READER1); verifyLastReadersAssignments( @@ -304,16 +298,15 @@ public void testKafkaClientProperties() throws Exception { AdminClient adminClient = (AdminClient) Whitebox.getInternalState(enumerator, "adminClient"); - assertNotNull(adminClient); + assertThat(adminClient).isNotNull(); String clientId = (String) Whitebox.getInternalState(adminClient, "clientId"); - assertNotNull(clientId); - assertTrue(clientId.startsWith(clientIdPrefix)); - assertEquals( - defaultTimeoutMs, - Whitebox.getInternalState(adminClient, "defaultApiTimeoutMs")); - - assertNotNull(clientId); - assertTrue(clientId.startsWith(clientIdPrefix)); + assertThat(clientId).isNotNull(); + assertThat(clientId).startsWith(clientIdPrefix); + assertThat(Whitebox.getInternalState(adminClient, "defaultApiTimeoutMs")) + .isEqualTo(defaultTimeoutMs); + + assertThat(clientId).isNotNull(); + assertThat(clientId).startsWith(clientIdPrefix); } } @@ -326,7 +319,7 @@ public void testSnapshotState() throws Throwable { // No reader is registered, so the state should be empty final KafkaSourceEnumState state1 = enumerator.snapshotState(1L); - assertTrue(state1.assignedPartitions().isEmpty()); + assertThat(state1.assignedPartitions()).isEmpty(); registerReader(context, enumerator, READER0); registerReader(context, enumerator, READER1); @@ -373,8 +366,8 @@ public void testPartitionChangeChecking() throws Throwable { expectedRemovedPartitions.add(new TopicPartition(TOPIC2, i)); } - assertEquals(expectedNewPartitions, partitionChange.getNewPartitions()); - assertEquals(expectedRemovedPartitions, partitionChange.getRemovedPartitions()); + assertThat(partitionChange.getNewPartitions()).isEqualTo(expectedNewPartitions); + assertThat(partitionChange.getRemovedPartitions()).isEqualTo(expectedRemovedPartitions); } } @@ -390,7 +383,7 @@ private void startEnumeratorAndRegisterReaders( // register reader 0 before the partition discovery. registerReader(context, enumerator, READER0); - assertTrue(context.getSplitsAssignmentSequence().isEmpty()); + assertThat(context.getSplitsAssignmentSequence()).isEmpty(); // Run the partition discover callable and check the partition assignment. runPeriodicPartitionDiscovery(context); @@ -495,11 +488,11 @@ private void verifyAssignments( (reader, splits) -> { Set expectedAssignmentsForReader = expectedAssignments.get(reader); - assertNotNull(expectedAssignmentsForReader); - assertEquals(expectedAssignmentsForReader.size(), splits.size()); + assertThat(expectedAssignmentsForReader).isNotNull(); + assertThat(splits.size()).isEqualTo(expectedAssignmentsForReader.size()); for (KafkaPartitionSplit split : splits) { - assertTrue( - expectedAssignmentsForReader.contains(split.getTopicPartition())); + assertThat(expectedAssignmentsForReader) + .contains(split.getTopicPartition()); } }); } @@ -537,7 +530,7 @@ private void verifySplitAssignmentWithPartitions( expectedAssignment.forEach( (reader, topicPartitions) -> allTopicPartitionsFromAssignment.addAll(topicPartitions)); - assertEquals(allTopicPartitionsFromAssignment, actualTopicPartitions); + assertThat(actualTopicPartitions).isEqualTo(allTopicPartitionsFromAssignment); } private Set asEnumState(Map> assignments) { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java index d55f833e022b6..058d2a095a9de 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/KafkaSourceEnumStateSerializerTest.java @@ -32,7 +32,7 @@ import java.util.Map; import java.util.Set; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link KafkaSourceEnumStateSerializer}. */ public class KafkaSourceEnumStateSerializerTest { @@ -52,7 +52,7 @@ public void testEnumStateSerde() throws IOException { final KafkaSourceEnumState restoredState = serializer.deserialize(serializer.getVersion(), bytes); - assertEquals(state.assignedPartitions(), restoredState.assignedPartitions()); + assertThat(restoredState.assignedPartitions()).isEqualTo(state.assignedPartitions()); } @Test @@ -72,7 +72,7 @@ public void testBackwardCompatibility() throws IOException { final KafkaSourceEnumState kafkaSourceEnumState = new KafkaSourceEnumStateSerializer().deserialize(0, bytes); - assertEquals(topicPartitions, kafkaSourceEnumState.assignedPartitions()); + assertThat(kafkaSourceEnumState.assignedPartitions()).isEqualTo(topicPartitions); } private Set constructTopicPartitions() { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java index 4b74bbeb45aab..1cd2aa8e3139c 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/initializer/OffsetsInitializerTest.java @@ -26,7 +26,6 @@ import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.common.TopicPartition; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -35,8 +34,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Unit tests for {@link OffsetsInitializer}. */ public class OffsetsInitializerTest { @@ -65,12 +63,13 @@ public void testEarliestOffsetsInitializer() { OffsetsInitializer initializer = OffsetsInitializer.earliest(); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map offsets = initializer.getPartitionOffsets(partitions, retriever); - assertEquals(partitions.size(), offsets.size()); - assertTrue(offsets.keySet().containsAll(partitions)); + assertThat(offsets).hasSize(partitions.size()); + assertThat(offsets.keySet().containsAll(partitions)).isTrue(); for (long offset : offsets.values()) { - Assert.assertEquals(KafkaPartitionSplit.EARLIEST_OFFSET, offset); + assertThat(offset).isEqualTo(KafkaPartitionSplit.EARLIEST_OFFSET); } - assertEquals(OffsetResetStrategy.EARLIEST, initializer.getAutoOffsetResetStrategy()); + assertThat(initializer.getAutoOffsetResetStrategy()) + .isEqualTo(OffsetResetStrategy.EARLIEST); } @Test @@ -78,12 +77,12 @@ public void testLatestOffsetsInitializer() { OffsetsInitializer initializer = OffsetsInitializer.latest(); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map offsets = initializer.getPartitionOffsets(partitions, retriever); - assertEquals(partitions.size(), offsets.size()); - assertTrue(offsets.keySet().containsAll(partitions)); + assertThat(offsets).hasSize(partitions.size()); + assertThat(offsets.keySet().containsAll(partitions)).isTrue(); for (long offset : offsets.values()) { - assertEquals(KafkaPartitionSplit.LATEST_OFFSET, offset); + assertThat(offset).isEqualTo(KafkaPartitionSplit.LATEST_OFFSET); } - assertEquals(OffsetResetStrategy.LATEST, initializer.getAutoOffsetResetStrategy()); + assertThat(initializer.getAutoOffsetResetStrategy()).isEqualTo(OffsetResetStrategy.LATEST); } @Test @@ -91,10 +90,11 @@ public void testCommittedGroupOffsetsInitializer() { OffsetsInitializer initializer = OffsetsInitializer.committedOffsets(); List partitions = KafkaSourceTestEnv.getPartitionsForTopic(TOPIC); Map offsets = initializer.getPartitionOffsets(partitions, retriever); - assertEquals(partitions.size(), offsets.size()); + assertThat(offsets).hasSize(partitions.size()); offsets.forEach( - (tp, offset) -> assertEquals(KafkaPartitionSplit.COMMITTED_OFFSET, (long) offset)); - assertEquals(OffsetResetStrategy.NONE, initializer.getAutoOffsetResetStrategy()); + (tp, offset) -> + assertThat((long) offset).isEqualTo(KafkaPartitionSplit.COMMITTED_OFFSET)); + assertThat(initializer.getAutoOffsetResetStrategy()).isEqualTo(OffsetResetStrategy.NONE); } @Test @@ -105,9 +105,9 @@ public void testTimestampOffsetsInitializer() { offsets.forEach( (tp, offset) -> { long expectedOffset = tp.partition() > 2 ? tp.partition() : 3L; - assertEquals(expectedOffset, (long) offset); + assertThat((long) offset).isEqualTo(expectedOffset); }); - assertEquals(OffsetResetStrategy.NONE, initializer.getAutoOffsetResetStrategy()); + assertThat(initializer.getAutoOffsetResetStrategy()).isEqualTo(OffsetResetStrategy.NONE); } @Test @@ -122,7 +122,8 @@ public void testSpecificOffsetsInitializer() { specifiedOffsets.remove(partitionSetToCommitted); OffsetsInitializer initializer = OffsetsInitializer.offsets(specifiedOffsets); - assertEquals(OffsetResetStrategy.EARLIEST, initializer.getAutoOffsetResetStrategy()); + assertThat(initializer.getAutoOffsetResetStrategy()) + .isEqualTo(OffsetResetStrategy.EARLIEST); // The partition without committed offset should fallback to offset reset strategy. TopicPartition partitionSetToEarliest = new TopicPartition(TOPIC2, 0); partitions.add(partitionSetToEarliest); @@ -138,8 +139,9 @@ public void testSpecificOffsetsInitializer() { } else { expectedOffset = specifiedOffsets.get(tp); } - assertEquals( - String.format("%s has incorrect offset.", tp), expectedOffset, (long) offset); + assertThat((long) offset) + .as(String.format("%s has incorrect offset.", tp)) + .isEqualTo(expectedOffset); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java index ea69a40fb87c0..258c1c0ab1ba3 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/enumerator/subscriber/KafkaSubscriberTest.java @@ -19,7 +19,6 @@ package org.apache.flink.connector.kafka.source.enumerator.subscriber; import org.apache.flink.connector.kafka.testutils.KafkaSourceTestEnv; -import org.apache.flink.util.ExceptionUtils; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.common.TopicPartition; @@ -35,9 +34,9 @@ import java.util.Set; import java.util.regex.Pattern; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.apache.flink.core.testutils.FlinkAssertions.anyCauseMatches; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Unit tests for {@link KafkaSubscriber}. */ public class KafkaSubscriberTest { @@ -71,7 +70,7 @@ public void testTopicListSubscriber() { final Set expectedSubscribedPartitions = new HashSet<>(KafkaSourceTestEnv.getPartitionsForTopics(topics)); - assertEquals(expectedSubscribedPartitions, subscribedPartitions); + assertThat(subscribedPartitions).isEqualTo(expectedSubscribedPartitions); } @Test @@ -80,15 +79,9 @@ public void testNonExistingTopic() { KafkaSubscriber.getTopicListSubscriber( Collections.singletonList(NON_EXISTING_TOPIC.topic())); - Throwable t = - assertThrows( - RuntimeException.class, - () -> subscriber.getSubscribedTopicPartitions(adminClient)); - - assertTrue( - "Exception should be caused by UnknownTopicOrPartitionException", - ExceptionUtils.findThrowable(t, UnknownTopicOrPartitionException.class) - .isPresent()); + assertThatThrownBy(() -> subscriber.getSubscribedTopicPartitions(adminClient)) + .isInstanceOf(RuntimeException.class) + .satisfies(anyCauseMatches(UnknownTopicOrPartitionException.class)); } @Test @@ -102,7 +95,7 @@ public void testTopicPatternSubscriber() { new HashSet<>( KafkaSourceTestEnv.getPartitionsForTopics(Collections.singleton(TOPIC2))); - assertEquals(expectedSubscribedPartitions, subscribedPartitions); + assertThat(subscribedPartitions).isEqualTo(expectedSubscribedPartitions); } @Test @@ -117,7 +110,7 @@ public void testPartitionSetSubscriber() { final Set subscribedPartitions = subscriber.getSubscribedTopicPartitions(adminClient); - assertEquals(partitions, subscribedPartitions); + assertThat(subscribedPartitions).isEqualTo(partitions); } @Test @@ -127,14 +120,11 @@ public void testNonExistingPartition() { KafkaSubscriber.getPartitionSetSubscriber( Collections.singleton(nonExistingPartition)); - Throwable t = - assertThrows( - RuntimeException.class, - () -> subscriber.getSubscribedTopicPartitions(adminClient)); - - assertEquals( - String.format( - "Partition '%s' does not exist on Kafka brokers", nonExistingPartition), - t.getMessage()); + assertThatThrownBy(() -> subscriber.getSubscribedTopicPartitions(adminClient)) + .isInstanceOf(RuntimeException.class) + .hasMessage( + String.format( + "Partition '%s' does not exist on Kafka brokers", + nonExistingPartition)); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java index 0a1faa5f3be4d..d9dd1d3b099fe 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/metrics/KafkaSourceReaderMetricsTest.java @@ -30,9 +30,8 @@ import static org.apache.flink.connector.kafka.source.metrics.KafkaSourceReaderMetrics.PARTITION_GROUP; import static org.apache.flink.connector.kafka.source.metrics.KafkaSourceReaderMetrics.TOPIC_GROUP; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Unit test for {@link KafkaSourceReaderMetrics}. */ public class KafkaSourceReaderMetricsTest { @@ -93,12 +92,12 @@ public void testCommitOffsetTracking() { metricListener.getCounter( KafkaSourceReaderMetrics.KAFKA_SOURCE_READER_METRIC_GROUP, KafkaSourceReaderMetrics.COMMITS_SUCCEEDED_METRIC_COUNTER); - assertTrue(commitsSucceededCounter.isPresent()); - assertEquals(0L, commitsSucceededCounter.get().getCount()); + assertThat(commitsSucceededCounter).isPresent(); + assertThat(commitsSucceededCounter.get().getCount()).isEqualTo(0L); kafkaSourceReaderMetrics.recordSucceededCommit(); - assertEquals(1L, commitsSucceededCounter.get().getCount()); + assertThat(commitsSucceededCounter.get().getCount()).isEqualTo(1L); } @Test @@ -107,12 +106,10 @@ public void testNonTrackingTopicPartition() { final KafkaSourceReaderMetrics kafkaSourceReaderMetrics = new KafkaSourceReaderMetrics( InternalSourceReaderMetricGroup.mock(metricListener.getMetricGroup())); - assertThrows( - IllegalArgumentException.class, - () -> kafkaSourceReaderMetrics.recordCurrentOffset(FOO_0, 15213L)); - assertThrows( - IllegalArgumentException.class, - () -> kafkaSourceReaderMetrics.recordCommittedOffset(FOO_0, 15213L)); + assertThatThrownBy(() -> kafkaSourceReaderMetrics.recordCurrentOffset(FOO_0, 15213L)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> kafkaSourceReaderMetrics.recordCommittedOffset(FOO_0, 15213L)) + .isInstanceOf(IllegalArgumentException.class); } @Test @@ -126,8 +123,8 @@ public void testFailedCommit() { metricListener.getCounter( KafkaSourceReaderMetrics.KAFKA_SOURCE_READER_METRIC_GROUP, KafkaSourceReaderMetrics.COMMITS_FAILED_METRIC_COUNTER); - assertTrue(commitsFailedCounter.isPresent()); - assertEquals(1L, commitsFailedCounter.get().getCount()); + assertThat(commitsFailedCounter).isPresent(); + assertThat(commitsFailedCounter.get().getCount()).isEqualTo(1L); } // ----------- Assertions -------------- @@ -142,8 +139,8 @@ private void assertCurrentOffset( PARTITION_GROUP, String.valueOf(tp.partition()), KafkaSourceReaderMetrics.CURRENT_OFFSET_METRIC_GAUGE); - assertTrue(currentOffsetGauge.isPresent()); - assertEquals(expectedOffset, (long) currentOffsetGauge.get().getValue()); + assertThat(currentOffsetGauge).isPresent(); + assertThat((long) currentOffsetGauge.get().getValue()).isEqualTo(expectedOffset); } private void assertCommittedOffset( @@ -156,7 +153,7 @@ private void assertCommittedOffset( PARTITION_GROUP, String.valueOf(tp.partition()), KafkaSourceReaderMetrics.COMMITTED_OFFSET_METRIC_GAUGE); - assertTrue(committedOffsetGauge.isPresent()); - assertEquals(expectedOffset, (long) committedOffsetGauge.get().getValue()); + assertThat(committedOffsetGauge).isPresent(); + assertThat((long) committedOffsetGauge.get().getValue()).isEqualTo(expectedOffset); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java index a34ed517e6332..6a9654adf5258 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaPartitionSplitReaderTest.java @@ -42,11 +42,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.IntegerDeserializer; -import org.hamcrest.CoreMatchers; -import org.hamcrest.MatcherAssert; -import org.hamcrest.Matchers; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -69,12 +65,8 @@ import java.util.concurrent.atomic.AtomicReference; import static org.apache.flink.connector.kafka.testutils.KafkaSourceTestEnv.NUM_RECORDS_PER_PARTITION; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Unit tests for {@link KafkaPartitionSplitReader}. */ public class KafkaPartitionSplitReaderTest { @@ -137,7 +129,7 @@ public void testWakeUp() throws Exception { reader.wakeUp(); Thread.sleep(10); } - assertNull(error.get()); + assertThat(error.get()).isNull(); } @Test @@ -159,7 +151,7 @@ public void testNumBytesInCounter() throws Exception { final long latestNumBytesIn = numBytesInCounter.getCount(); // Since it's hard to know the exact number of bytes consumed, we just check if it is // greater than 0 - assertThat(latestNumBytesIn, Matchers.greaterThan(0L)); + assertThat(latestNumBytesIn).isGreaterThan(0L); // Add another split reader.handleSplitsChanges( new SplitsAddition<>( @@ -167,7 +159,7 @@ public void testNumBytesInCounter() throws Exception { new KafkaPartitionSplit(new TopicPartition(TOPIC2, 0), 0L)))); reader.fetch(); // We just check if numBytesIn is increasing - assertThat(numBytesInCounter.getCount(), Matchers.greaterThan(latestNumBytesIn)); + assertThat(numBytesInCounter.getCount()).isGreaterThan(latestNumBytesIn); } @ParameterizedTest @@ -195,18 +187,19 @@ public void testPendingRecordsGauge(String topicSuffix) throws Throwable { Collections.singletonList( new KafkaPartitionSplit(new TopicPartition(topic1Name, 0), 0L)))); // pendingRecords should have not been registered because of lazily registration - assertFalse(metricListener.getGauge(MetricNames.PENDING_RECORDS).isPresent()); + assertThat(metricListener.getGauge(MetricNames.PENDING_RECORDS)).isNotPresent(); // Trigger first fetch reader.fetch(); final Optional> pendingRecords = metricListener.getGauge(MetricNames.PENDING_RECORDS); - assertTrue(pendingRecords.isPresent()); + assertThat(pendingRecords).isPresent(); // Validate pendingRecords - assertNotNull(pendingRecords); - assertEquals(NUM_RECORDS_PER_PARTITION - 1, (long) pendingRecords.get().getValue()); + assertThat(pendingRecords).isNotNull(); + assertThat((long) pendingRecords.get().getValue()).isEqualTo(NUM_RECORDS_PER_PARTITION - 1); for (int i = 1; i < NUM_RECORDS_PER_PARTITION; i++) { reader.fetch(); - assertEquals(NUM_RECORDS_PER_PARTITION - i - 1, (long) pendingRecords.get().getValue()); + assertThat((long) pendingRecords.get().getValue()) + .isEqualTo(NUM_RECORDS_PER_PARTITION - i - 1); } // Add another split reader.handleSplitsChanges( @@ -216,7 +209,8 @@ public void testPendingRecordsGauge(String topicSuffix) throws Throwable { // Validate pendingRecords for (int i = 0; i < NUM_RECORDS_PER_PARTITION; i++) { reader.fetch(); - assertEquals(NUM_RECORDS_PER_PARTITION - i - 1, (long) pendingRecords.get().getValue()); + assertThat((long) pendingRecords.get().getValue()) + .isEqualTo(NUM_RECORDS_PER_PARTITION - i - 1); } } @@ -237,7 +231,7 @@ public void testAssignEmptySplit() throws Exception { // Fetch and check empty splits is added to finished splits RecordsWithSplitIds> recordsWithSplitIds = reader.fetch(); - assertTrue(recordsWithSplitIds.finishedSplits().contains(emptySplit.splitId())); + assertThat(recordsWithSplitIds.finishedSplits()).contains(emptySplit.splitId()); // Assign another valid split to avoid consumer.poll() blocking final KafkaPartitionSplit anotherNormalSplit = @@ -250,7 +244,7 @@ public void testAssignEmptySplit() throws Exception { // Fetch again and check empty split set is cleared recordsWithSplitIds = reader.fetch(); - assertTrue(recordsWithSplitIds.finishedSplits().isEmpty()); + assertThat(recordsWithSplitIds.finishedSplits()).isEmpty(); } @Test @@ -264,9 +258,7 @@ public void testUsingCommittedOffsetsWithNoneOffsetResetStrategy() { // committed offset, and the offset reset strategy is none (Throw exception to the consumer // if no previous offset is found for the consumer's group); // So it is expected to throw an exception that missing the committed offset. - final KafkaException undefinedOffsetException = - Assertions.assertThrows( - KafkaException.class, + assertThatThrownBy( () -> reader.handleSplitsChanges( new SplitsAddition<>( @@ -274,10 +266,9 @@ public void testUsingCommittedOffsetsWithNoneOffsetResetStrategy() { new KafkaPartitionSplit( new TopicPartition(TOPIC1, 0), KafkaPartitionSplit - .COMMITTED_OFFSET))))); - MatcherAssert.assertThat( - undefinedOffsetException.getMessage(), - CoreMatchers.containsString("Undefined offset with no reset policy for partition")); + .COMMITTED_OFFSET))))) + .isInstanceOf(KafkaException.class) + .hasMessageContaining("Undefined offset with no reset policy for partition"); } @ParameterizedTest @@ -298,7 +289,7 @@ public void testUsingCommittedOffsetsWithEarliestOrLatestOffsetResetStrategy( partition, KafkaPartitionSplit.COMMITTED_OFFSET)))); // Verify that the current offset of the consumer is the expected offset - assertEquals(expectedOffset, reader.consumer().position(partition)); + assertThat(reader.consumer().position(partition)).isEqualTo(expectedOffset); } // ------------------ @@ -347,12 +338,12 @@ private void assignSplitsAndFetchUntilFinish(KafkaPartitionSplitReader reader, i TopicPartition tp = splits.get(splitId).getTopicPartition(); long earliestOffset = earliestOffsets.get(tp); long expectedRecordCount = NUM_RECORDS_PER_PARTITION - earliestOffset; - assertEquals( - expectedRecordCount, - (long) recordCount, - String.format( - "%s should have %d records.", - splits.get(splitId), expectedRecordCount)); + assertThat((long) recordCount) + .as( + String.format( + "%s should have %d records.", + splits.get(splitId), expectedRecordCount)) + .isEqualTo(expectedRecordCount); }); } @@ -397,9 +388,10 @@ private boolean verifyConsumed( int expectedValue = (int) expectedOffset; long expectedTimestamp = expectedOffset * 1000L; - assertEquals(expectedValue, deserializer.deserialize(record.topic(), record.value())); - assertEquals(expectedOffset, record.offset()); - assertEquals(expectedTimestamp, record.timestamp()); + assertThat(deserializer.deserialize(record.topic(), record.value())) + .isEqualTo(expectedValue); + assertThat(record.offset()).isEqualTo(expectedOffset); + assertThat(record.timestamp()).isEqualTo(expectedTimestamp); expectedOffset++; } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java index fb494aeba8fb1..9e6c1fc4faa17 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java @@ -50,7 +50,6 @@ import org.apache.kafka.common.serialization.IntegerDeserializer; import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.StringSerializer; -import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -81,6 +80,7 @@ import static org.apache.flink.connector.kafka.testutils.KafkaSourceTestEnv.NUM_PARTITIONS; import static org.apache.flink.core.testutils.CommonTestUtils.waitUtil; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; /** Unit tests for {@link KafkaSourceReader}. */ public class KafkaSourceReaderTest extends SourceReaderTestBase { @@ -349,9 +349,7 @@ void testKafkaSourceMetrics() throws Exception { // Metric "commit-total" of KafkaConsumer should be greater than 0 // It's hard to know the exactly number of commit because of the retry - MatcherAssert.assertThat( - getKafkaConsumerMetric("commit-total", metricListener), - Matchers.greaterThan(0L)); + assertThat(getKafkaConsumerMetric("commit-total", metricListener)).isGreaterThan(0L); // Committed offset should be NUM_RECORD_PER_SPLIT assertThat(getCommittedOffsetMetric(tp0, metricListener)) @@ -364,7 +362,7 @@ void testKafkaSourceMetrics() throws Exception { metricListener.getCounter( KAFKA_SOURCE_READER_METRIC_GROUP, COMMITS_SUCCEEDED_METRIC_COUNTER); assertThat(commitsSucceeded).isPresent(); - MatcherAssert.assertThat(commitsSucceeded.get().getCount(), Matchers.greaterThan(0L)); + assertThat(commitsSucceeded.get().getCount()).isGreaterThan(0L); } } @@ -395,11 +393,14 @@ void testAssigningEmptySplits() throws Exception { new TestingReaderOutput<>(), () -> reader.getNumAliveFetchers() == 0, "The split fetcher did not exit before timeout."); - MatcherAssert.assertThat( - finishedSplits, - Matchers.containsInAnyOrder( - KafkaPartitionSplit.toSplitId(normalSplit.getTopicPartition()), - KafkaPartitionSplit.toSplitId(emptySplit.getTopicPartition()))); + assertThat(finishedSplits) + .satisfies( + matching( + Matchers.containsInAnyOrder( + KafkaPartitionSplit.toSplitId( + normalSplit.getTopicPartition()), + KafkaPartitionSplit.toSplitId( + emptySplit.getTopicPartition())))); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java index e4c9191e2caec..b0b898fb5ad1a 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/deserializer/KafkaRecordDeserializationSchemaTest.java @@ -34,7 +34,6 @@ import org.apache.kafka.common.serialization.StringSerializer; import org.junit.Before; import org.junit.Test; -import org.junit.jupiter.api.Assertions; import java.io.IOException; import java.util.ArrayList; @@ -42,10 +41,7 @@ import java.util.List; import java.util.Map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Unit tests for KafkaRecordDeserializationSchema. */ public class KafkaRecordDeserializationSchemaTest { @@ -69,14 +65,14 @@ public void testKafkaDeserializationSchemaWrapper() throws IOException { SimpleCollector collector = new SimpleCollector<>(); schema.deserialize(consumerRecord, collector); - assertEquals(1, collector.list.size()); + assertThat(collector.list).hasSize(1); ObjectNode deserializedValue = collector.list.get(0); - assertEquals(4, deserializedValue.get("key").get("index").asInt()); - assertEquals("world", deserializedValue.get("value").get("word").asText()); - assertEquals("topic#1", deserializedValue.get("metadata").get("topic").asText()); - assertEquals(4, deserializedValue.get("metadata").get("offset").asInt()); - assertEquals(3, deserializedValue.get("metadata").get("partition").asInt()); + assertThat(deserializedValue.get("key").get("index").asInt()).isEqualTo(4); + assertThat(deserializedValue.get("value").get("word").asText()).isEqualTo("world"); + assertThat(deserializedValue.get("metadata").get("topic").asText()).isEqualTo("topic#1"); + assertThat(deserializedValue.get("metadata").get("offset").asInt()).isEqualTo(4); + assertThat(deserializedValue.get("metadata").get("partition").asInt()).isEqualTo(3); } @Test @@ -87,12 +83,12 @@ public void testKafkaValueDeserializationSchemaWrapper() throws IOException { SimpleCollector collector = new SimpleCollector<>(); schema.deserialize(consumerRecord, collector); - assertEquals(1, collector.list.size()); + assertThat(collector.list).hasSize(1); ObjectNode deserializedValue = collector.list.get(0); - assertEquals("world", deserializedValue.get("word").asText()); - assertNull(deserializedValue.get("key")); - assertNull(deserializedValue.get("metadata")); + assertThat(deserializedValue.get("word").asText()).isEqualTo("world"); + assertThat(deserializedValue.get("key")).isNull(); + assertThat(deserializedValue.get("metadata")).isNull(); } @Test @@ -108,8 +104,8 @@ public void testKafkaValueDeserializerWrapper() throws Exception { SimpleCollector collector = new SimpleCollector<>(); schema.deserialize(consumerRecord, collector); - assertEquals(1, collector.list.size()); - assertEquals("world", collector.list.get(0)); + assertThat(collector.list).hasSize(1); + assertThat(collector.list.get(0)).isEqualTo("world"); } @Test @@ -118,9 +114,9 @@ public void testKafkaValueDeserializerWrapperWithoutConfigurable() throws Except KafkaRecordDeserializationSchema schema = KafkaRecordDeserializationSchema.valueOnly(SimpleStringSerializer.class, config); schema.open(new TestingDeserializationContext()); - Assertions.assertEquals(configuration, config); - assertFalse(isKeyDeserializer); - assertTrue(configurableConfiguration.isEmpty()); + assertThat(config).isEqualTo(configuration); + assertThat(isKeyDeserializer).isFalse(); + assertThat(configurableConfiguration).isEmpty(); } @Test @@ -130,9 +126,9 @@ public void testKafkaValueDeserializerWrapperWithConfigurable() throws Exception KafkaRecordDeserializationSchema.valueOnly( ConfigurableStringSerializer.class, config); schema.open(new TestingDeserializationContext()); - Assertions.assertEquals(configurableConfiguration, config); - assertFalse(isKeyDeserializer); - assertTrue(configuration.isEmpty()); + assertThat(config).isEqualTo(configurableConfiguration); + assertThat(isKeyDeserializer).isFalse(); + assertThat(configuration).isEmpty(); } private ConsumerRecord getConsumerRecord() throws JsonProcessingException { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/testutils/KafkaSourceTestEnv.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/testutils/KafkaSourceTestEnv.java index 853cbc8b1d18c..42691dbb31e08 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/testutils/KafkaSourceTestEnv.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/testutils/KafkaSourceTestEnv.java @@ -44,7 +44,7 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Base class for KafkaSource unit tests. */ public class KafkaSourceTestEnv extends KafkaTestBase { @@ -240,7 +240,7 @@ public static void setupCommittedOffsets(String topic) new ArrayList<>(committedOffsets.keySet()))) .partitionsToOffsetAndMetadata() .get(); - assertEquals("The offsets are not committed", committedOffsets, toVerify); + assertThat(toVerify).as("The offsets are not committed").isEqualTo(committedOffsets); } public static void produceToKafka(Collection> records) diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java index 5cd7a86c47aa6..8db2e596c339d 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkFixedPartitionerTest.java @@ -20,9 +20,10 @@ import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; -import org.junit.Assert; import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** Tests for the {@link FlinkFixedPartitioner}. */ public class FlinkFixedPartitionerTest { @@ -44,19 +45,18 @@ public void testMoreFlinkThanBrokers() { int[] partitions = new int[] {0}; part.open(0, 4); - Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions)); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(0); part.open(1, 4); - Assert.assertEquals(0, part.partition("abc2", null, null, null, partitions)); + assertThat(part.partition("abc2", null, null, null, partitions)).isEqualTo(0); part.open(2, 4); - Assert.assertEquals(0, part.partition("abc3", null, null, null, partitions)); - Assert.assertEquals( - 0, - part.partition("abc3", null, null, null, partitions)); // check if it is changing ;) + assertThat(part.partition("abc3", null, null, null, partitions)).isEqualTo(0); + assertThat(part.partition("abc3", null, null, null, partitions)) + .isEqualTo(0); // check if it is changing ;) part.open(3, 4); - Assert.assertEquals(0, part.partition("abc4", null, null, null, partitions)); + assertThat(part.partition("abc4", null, null, null, partitions)).isEqualTo(0); } /** @@ -78,12 +78,12 @@ public void testFewerPartitions() { int[] partitions = new int[] {0, 1, 2, 3, 4}; part.open(0, 2); - Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions)); - Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions)); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(0); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(0); part.open(1, 2); - Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions)); - Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions)); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(1); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(1); } /* @@ -98,12 +98,12 @@ public void testMixedCase() { int[] partitions = new int[] {0, 1}; part.open(0, 3); - Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions)); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(0); part.open(1, 3); - Assert.assertEquals(1, part.partition("abc1", null, null, null, partitions)); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(1); part.open(2, 3); - Assert.assertEquals(0, part.partition("abc1", null, null, null, partitions)); + assertThat(part.partition("abc1", null, null, null, partitions)).isEqualTo(0); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java index 645278c959737..b1947a5653497 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseMigrationTest.java @@ -53,8 +53,7 @@ import java.util.Map; import java.util.stream.Collectors; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.powermock.api.mockito.PowerMockito.doAnswer; @@ -222,11 +221,11 @@ public void testRestoreFromEmptyStateNoPartitions() throws Exception { testHarness.open(); // assert that no partitions were found and is empty - assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); - assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); + assertThat(consumerFunction.getSubscribedPartitionsToStartOffsets()).isNotNull(); + assertThat(consumerFunction.getSubscribedPartitionsToStartOffsets()).isEmpty(); // assert that no state was restored - assertTrue(consumerFunction.getRestoredState().isEmpty()); + assertThat(consumerFunction.getRestoredState()).isEmpty(); consumerOperator.close(); consumerOperator.cancel(); @@ -275,20 +274,18 @@ public void testRestoreFromEmptyStateWithPartitions() throws Exception { } // assert that there are partitions and is identical to expected list - assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); - assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); - assertEquals( - expectedSubscribedPartitionsWithStartOffsets, - consumerFunction.getSubscribedPartitionsToStartOffsets()); + assertThat(consumerFunction.getSubscribedPartitionsToStartOffsets()).isNotNull(); + assertThat(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()).isTrue(); + assertThat(consumerFunction.getSubscribedPartitionsToStartOffsets()) + .isEqualTo(expectedSubscribedPartitionsWithStartOffsets); // the new partitions should have been considered as restored state - assertTrue(consumerFunction.getRestoredState() != null); - assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); + assertThat(consumerFunction.getRestoredState()).isNotNull(); + assertThat(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()).isTrue(); for (Map.Entry expectedEntry : expectedSubscribedPartitionsWithStartOffsets.entrySet()) { - assertEquals( - expectedEntry.getValue(), - consumerFunction.getRestoredState().get(expectedEntry.getKey())); + assertThat(consumerFunction.getRestoredState().get(expectedEntry.getKey())) + .isEqualTo(expectedEntry.getValue()); } consumerOperator.close(); @@ -325,15 +322,16 @@ public void testRestore() throws Exception { testHarness.open(); // assert that there are partitions and is identical to expected list - assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); - assertTrue(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); + assertThat(consumerFunction.getSubscribedPartitionsToStartOffsets()).isNotNull(); + assertThat(!consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()).isTrue(); // on restore, subscribedPartitionsToStartOffsets should be identical to the restored state - assertEquals(PARTITION_STATE, consumerFunction.getSubscribedPartitionsToStartOffsets()); + assertThat(consumerFunction.getSubscribedPartitionsToStartOffsets()) + .isEqualTo(PARTITION_STATE); // assert that state is correctly restored from legacy checkpoint - assertTrue(consumerFunction.getRestoredState() != null); - assertEquals(PARTITION_STATE, consumerFunction.getRestoredState()); + assertThat(consumerFunction.getRestoredState()).isNotNull(); + assertThat(consumerFunction.getRestoredState()).isEqualTo(PARTITION_STATE); consumerOperator.close(); consumerOperator.cancel(); diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java index 5e1000928b393..a2438165e967f 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaConsumerBaseTest.java @@ -69,7 +69,6 @@ import org.apache.flink.util.function.SupplierWithException; import org.apache.flink.util.function.ThrowingRunnable; -import org.junit.Assert; import org.junit.Test; import javax.annotation.Nonnull; @@ -90,18 +89,15 @@ import java.util.stream.Collectors; import static org.apache.flink.util.Preconditions.checkState; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; import static org.hamcrest.collection.IsIn.isIn; import static org.hamcrest.collection.IsMapContaining.hasKey; import static org.hamcrest.core.IsNot.not; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -114,19 +110,19 @@ public class FlinkKafkaConsumerBaseTest extends TestLogger { @Test @SuppressWarnings("unchecked") public void testEitherWatermarkExtractor() { - try { - new DummyFlinkKafkaConsumer() - .assignTimestampsAndWatermarks((AssignerWithPeriodicWatermarks) null); - fail(); - } catch (NullPointerException ignored) { - } - - try { - new DummyFlinkKafkaConsumer() - .assignTimestampsAndWatermarks((AssignerWithPunctuatedWatermarks) null); - fail(); - } catch (NullPointerException ignored) { - } + assertThatThrownBy( + () -> + new DummyFlinkKafkaConsumer() + .assignTimestampsAndWatermarks( + (AssignerWithPeriodicWatermarks) null)) + .isInstanceOf(NullPointerException.class); + + assertThatThrownBy( + () -> + new DummyFlinkKafkaConsumer() + .assignTimestampsAndWatermarks( + (AssignerWithPunctuatedWatermarks) null)) + .isInstanceOf(NullPointerException.class); final AssignerWithPeriodicWatermarks periodicAssigner = mock(AssignerWithPeriodicWatermarks.class); @@ -135,19 +131,13 @@ public void testEitherWatermarkExtractor() { DummyFlinkKafkaConsumer c1 = new DummyFlinkKafkaConsumer<>(); c1.assignTimestampsAndWatermarks(periodicAssigner); - try { - c1.assignTimestampsAndWatermarks(punctuatedAssigner); - fail(); - } catch (IllegalStateException ignored) { - } + assertThatThrownBy(() -> c1.assignTimestampsAndWatermarks(punctuatedAssigner)) + .isInstanceOf(IllegalStateException.class); DummyFlinkKafkaConsumer c2 = new DummyFlinkKafkaConsumer<>(); c2.assignTimestampsAndWatermarks(punctuatedAssigner); - try { - c2.assignTimestampsAndWatermarks(periodicAssigner); - fail(); - } catch (IllegalStateException ignored) { - } + assertThatThrownBy(() -> c2.assignTimestampsAndWatermarks(periodicAssigner)) + .isInstanceOf(IllegalStateException.class); } /** Tests that no checkpoints happen when the fetcher is not running. */ @@ -167,12 +157,12 @@ public void ignoreCheckpointWhenNotRunning() throws Exception { consumer.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1)); // no state should have been checkpointed - assertFalse(listState.get().iterator().hasNext()); + assertThat(listState.get().iterator().hasNext()).isFalse(); // acknowledgement of the checkpoint should also not result in any offset commits consumer.notifyCheckpointComplete(1L); - assertNull(fetcher.getAndClearLastCommittedOffsets()); - assertEquals(0, fetcher.getCommitCount()); + assertThat(fetcher.getAndClearLastCommittedOffsets()).isNull(); + assertThat(fetcher.getCommitCount()).isEqualTo(0); } /** @@ -194,7 +184,7 @@ public void checkRestoredCheckpointWhenFetcherNotReady() throws Exception { // ensure that the list was cleared and refilled. while this is an implementation detail, we // use it here // to figure out that snapshotState() actually did something. - Assert.assertTrue(restoredListState.isClearCalled()); + assertThat(restoredListState.isClearCalled()).isTrue(); Set expected = new HashSet<>(); @@ -205,11 +195,11 @@ public void checkRestoredCheckpointWhenFetcherNotReady() throws Exception { int counter = 0; for (Serializable serializable : restoredListState.get()) { - assertTrue(expected.contains(serializable)); + assertThat(expected).contains(serializable); counter++; } - assertEquals(expected.size(), counter); + assertThat(counter).isEqualTo(expected.size()); } @Test @@ -222,7 +212,7 @@ public void testConfigureOnCheckpointsCommitMode() throws Exception { consumer, false, null, true, // enable checkpointing; auto commit should be ignored 0, 1); - assertEquals(OffsetCommitMode.ON_CHECKPOINTS, consumer.getOffsetCommitMode()); + assertThat(consumer.getOffsetCommitMode()).isEqualTo(OffsetCommitMode.ON_CHECKPOINTS); } @Test @@ -232,7 +222,7 @@ public void testConfigureAutoCommitMode() throws Exception { setupConsumer(consumer); - assertEquals(OffsetCommitMode.KAFKA_PERIODIC, consumer.getOffsetCommitMode()); + assertThat(consumer.getOffsetCommitMode()).isEqualTo(OffsetCommitMode.KAFKA_PERIODIC); } @Test @@ -247,7 +237,7 @@ public void testConfigureDisableOffsetCommitWithCheckpointing() throws Exception consumer, false, null, true, // enable checkpointing; auto commit should be ignored 0, 1); - assertEquals(OffsetCommitMode.DISABLED, consumer.getOffsetCommitMode()); + assertThat(consumer.getOffsetCommitMode()).isEqualTo(OffsetCommitMode.DISABLED); } @Test @@ -257,7 +247,7 @@ public void testConfigureDisableOffsetCommitWithoutCheckpointing() throws Except setupConsumer(consumer); - assertEquals(OffsetCommitMode.DISABLED, consumer.getOffsetCommitMode()); + assertThat(consumer.getOffsetCommitMode()).isEqualTo(OffsetCommitMode.DISABLED); } /** @@ -375,11 +365,11 @@ private void checkFilterRestoredPartitionsWithDisovered( Map subscribedPartitionsToStartOffsets = consumer.getSubscribedPartitionsToStartOffsets(); - assertEquals( - new HashSet<>(expectedSubscribedPartitions), - subscribedPartitionsToStartOffsets.keySet().stream() - .map(partition -> partition.getTopic()) - .collect(Collectors.toSet())); + assertThat( + subscribedPartitionsToStartOffsets.keySet().stream() + .map(partition -> partition.getTopic()) + .collect(Collectors.toSet())) + .isEqualTo(new HashSet<>(expectedSubscribedPartitions)); } @Test @@ -426,7 +416,7 @@ public void go() throws Exception { runThread.start(); fetcher.waitUntilRun(); - assertEquals(0, consumer.getPendingOffsetsToCommit().size()); + assertThat(consumer.getPendingOffsetsToCommit()).isEmpty(); // checkpoint 1 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(138, 138)); @@ -439,9 +429,9 @@ public void go() throws Exception { snapshot1.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } - assertEquals(state1, snapshot1); - assertEquals(1, consumer.getPendingOffsetsToCommit().size()); - assertEquals(state1, consumer.getPendingOffsetsToCommit().get(138L)); + assertThat(snapshot1).isEqualTo(state1); + assertThat(consumer.getPendingOffsetsToCommit()).hasSize(1); + assertThat(consumer.getPendingOffsetsToCommit().get(138L)).isEqualTo(state1); // checkpoint 2 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140)); @@ -454,16 +444,16 @@ public void go() throws Exception { snapshot2.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } - assertEquals(state2, snapshot2); - assertEquals(2, consumer.getPendingOffsetsToCommit().size()); - assertEquals(state2, consumer.getPendingOffsetsToCommit().get(140L)); + assertThat(snapshot2).isEqualTo(state2); + assertThat(consumer.getPendingOffsetsToCommit()).hasSize(2); + assertThat(consumer.getPendingOffsetsToCommit().get(140L)).isEqualTo(state2); // ack checkpoint 1 consumer.notifyCheckpointComplete(138L); - assertEquals(1, consumer.getPendingOffsetsToCommit().size()); - assertTrue(consumer.getPendingOffsetsToCommit().containsKey(140L)); - assertEquals(state1, fetcher.getAndClearLastCommittedOffsets()); - assertEquals(1, fetcher.getCommitCount()); + assertThat(consumer.getPendingOffsetsToCommit()).hasSize(1); + assertThat(consumer.getPendingOffsetsToCommit()).containsKey(140L); + assertThat(fetcher.getAndClearLastCommittedOffsets()).isEqualTo(state1); + assertThat(fetcher.getCommitCount()).isEqualTo(1); // checkpoint 3 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141)); @@ -476,20 +466,20 @@ public void go() throws Exception { snapshot3.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } - assertEquals(state3, snapshot3); - assertEquals(2, consumer.getPendingOffsetsToCommit().size()); - assertEquals(state3, consumer.getPendingOffsetsToCommit().get(141L)); + assertThat(snapshot3).isEqualTo(state3); + assertThat(consumer.getPendingOffsetsToCommit()).hasSize(2); + assertThat(consumer.getPendingOffsetsToCommit().get(141L)).isEqualTo(state3); // ack checkpoint 3, subsumes number 2 consumer.notifyCheckpointComplete(141L); - assertEquals(0, consumer.getPendingOffsetsToCommit().size()); - assertEquals(state3, fetcher.getAndClearLastCommittedOffsets()); - assertEquals(2, fetcher.getCommitCount()); + assertThat(consumer.getPendingOffsetsToCommit()).isEmpty(); + assertThat(fetcher.getAndClearLastCommittedOffsets()).isEqualTo(state3); + assertThat(fetcher.getCommitCount()).isEqualTo(2); consumer.notifyCheckpointComplete(666); // invalid checkpoint - assertEquals(0, consumer.getPendingOffsetsToCommit().size()); - assertNull(fetcher.getAndClearLastCommittedOffsets()); - assertEquals(2, fetcher.getCommitCount()); + assertThat(consumer.getPendingOffsetsToCommit()).isEmpty(); + assertThat(fetcher.getAndClearLastCommittedOffsets()).isNull(); + assertThat(fetcher.getCommitCount()).isEqualTo(2); consumer.cancel(); runThread.sync(); @@ -539,7 +529,7 @@ public void go() throws Exception { runThread.start(); fetcher.waitUntilRun(); - assertEquals(0, consumer.getPendingOffsetsToCommit().size()); + assertThat(consumer.getPendingOffsetsToCommit()).isEmpty(); // checkpoint 1 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(138, 138)); @@ -552,11 +542,9 @@ public void go() throws Exception { snapshot1.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } - assertEquals(state1, snapshot1); - assertEquals( - 0, - consumer.getPendingOffsetsToCommit() - .size()); // pending offsets to commit should not be updated + assertThat(snapshot1).isEqualTo(state1); + assertThat(consumer.getPendingOffsetsToCommit().size()) + .isEqualTo(0); // pending offsets to commit should not be updated // checkpoint 2 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(140, 140)); @@ -569,16 +557,15 @@ public void go() throws Exception { snapshot2.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } - assertEquals(state2, snapshot2); - assertEquals( - 0, - consumer.getPendingOffsetsToCommit() - .size()); // pending offsets to commit should not be updated + assertThat(snapshot2).isEqualTo(state2); + assertThat(consumer.getPendingOffsetsToCommit().size()) + .isEqualTo(0); // pending offsets to commit should not be updated // ack checkpoint 1 consumer.notifyCheckpointComplete(138L); - assertEquals(0, fetcher.getCommitCount()); - assertNull(fetcher.getAndClearLastCommittedOffsets()); // no offsets should be committed + assertThat(fetcher.getCommitCount()).isEqualTo(0); + assertThat(fetcher.getAndClearLastCommittedOffsets()) + .isNull(); // no offsets should be committed // checkpoint 3 consumer.snapshotState(new StateSnapshotContextSynchronousImpl(141, 141)); @@ -591,20 +578,20 @@ public void go() throws Exception { snapshot3.put(kafkaTopicPartitionLongTuple2.f0, kafkaTopicPartitionLongTuple2.f1); } - assertEquals(state3, snapshot3); - assertEquals( - 0, - consumer.getPendingOffsetsToCommit() - .size()); // pending offsets to commit should not be updated + assertThat(snapshot3).isEqualTo(state3); + assertThat(consumer.getPendingOffsetsToCommit().size()) + .isEqualTo(0); // pending offsets to commit should not be updated // ack checkpoint 3, subsumes number 2 consumer.notifyCheckpointComplete(141L); - assertEquals(0, fetcher.getCommitCount()); - assertNull(fetcher.getAndClearLastCommittedOffsets()); // no offsets should be committed + assertThat(fetcher.getCommitCount()).isEqualTo(0); + assertThat(fetcher.getAndClearLastCommittedOffsets()) + .isNull(); // no offsets should be committed consumer.notifyCheckpointComplete(666); // invalid checkpoint - assertEquals(0, fetcher.getCommitCount()); - assertNull(fetcher.getAndClearLastCommittedOffsets()); // no offsets should be committed + assertThat(fetcher.getCommitCount()).isEqualTo(0); + assertThat(fetcher.getAndClearLastCommittedOffsets()) + .isNull(); // no offsets should be committed consumer.cancel(); runThread.sync(); @@ -621,9 +608,9 @@ public void testClosePartitionDiscovererWhenOpenThrowException() throws Exceptio new DummyFlinkKafkaConsumer<>(failingPartitionDiscoverer); testFailingConsumerLifecycle(consumer, failureCause); - assertTrue( - "partitionDiscoverer should be closed when consumer is closed", - failingPartitionDiscoverer.isClosed()); + assertThat(failingPartitionDiscoverer.isClosed()) + .as("partitionDiscoverer should be closed when consumer is closed") + .isTrue(); } @Test @@ -640,9 +627,9 @@ public void testClosePartitionDiscovererWhenCreateKafkaFetcherFails() throws Exc 100L); testFailingConsumerLifecycle(consumer, failureCause); - assertTrue( - "partitionDiscoverer should be closed when consumer is closed", - testPartitionDiscoverer.isClosed()); + assertThat(testPartitionDiscoverer.isClosed()) + .as("partitionDiscoverer should be closed when consumer is closed") + .isTrue(); } @Test @@ -663,9 +650,9 @@ public void testClosePartitionDiscovererWhenKafkaFetcherFails() throws Exception new DummyFlinkKafkaConsumer<>(() -> mock, testPartitionDiscoverer, 100L); testFailingConsumerLifecycle(consumer, failureCause); - assertTrue( - "partitionDiscoverer should be closed when consumer is closed", - testPartitionDiscoverer.isClosed()); + assertThat(testPartitionDiscoverer.isClosed()) + .as("partitionDiscoverer should be closed when consumer is closed") + .isTrue(); } private void testFailingConsumerLifecycle( @@ -679,10 +666,9 @@ private void testFailingConsumerLifecycle( "Exception should have been thrown from open / run method of FlinkKafkaConsumerBase."); } catch (Exception e) { assertThat( - ExceptionUtils.findThrowable( - e, throwable -> throwable.equals(expectedException)) - .isPresent(), - is(true)); + ExceptionUtils.findThrowable( + e, throwable -> throwable.equals(expectedException))) + .isPresent(); } testKafkaConsumer.close(); } @@ -695,9 +681,9 @@ public void testClosePartitionDiscovererWithCancellation() throws Exception { new TestingFlinkKafkaConsumer<>(testPartitionDiscoverer, 100L); testNormalConsumerLifecycle(consumer); - assertTrue( - "partitionDiscoverer should be closed when consumer is closed", - testPartitionDiscoverer.isClosed()); + assertThat(testPartitionDiscoverer.isClosed()) + .as("partitionDiscoverer should be closed when consumer is closed") + .isTrue(); } private void testNormalConsumerLifecycle(FlinkKafkaConsumerBase testKafkaConsumer) @@ -740,10 +726,10 @@ public void testExplicitStateSerializerCompatibility() throws Exception { Tuple2 actualTuple = InstantiationUtil.deserializeFromByteArray(kafkaConsumerSerializer, bytes); - Assert.assertEquals( - "Explicit Serializer is not compatible with previous method of creating Serializer using TypeHint.", - tuple, - actualTuple); + assertThat(actualTuple) + .as( + "Explicit Serializer is not compatible with previous method of creating Serializer using TypeHint.") + .isEqualTo(tuple); } @Test @@ -816,15 +802,14 @@ private void testRescaling( // make sure that no one else is subscribed to these partitions for (KafkaTopicPartition partition : subscribedPartitions.keySet()) { - assertThat(globalSubscribedPartitions, not(hasKey(partition))); + assertThat(globalSubscribedPartitions).satisfies(matching(not(hasKey(partition)))); } globalSubscribedPartitions.putAll(subscribedPartitions); } - assertThat(globalSubscribedPartitions.values(), hasSize(numPartitions)); - assertThat( - mockFetchedPartitionsOnStartup, - everyItem(isIn(globalSubscribedPartitions.keySet()))); + assertThat(globalSubscribedPartitions.values()).satisfies(matching(hasSize(numPartitions))); + assertThat(mockFetchedPartitionsOnStartup) + .satisfies(matching(everyItem(isIn(globalSubscribedPartitions.keySet())))); OperatorSubtaskState[] state = new OperatorSubtaskState[initialParallelism]; @@ -885,15 +870,16 @@ private void testRescaling( // make sure that no one else is subscribed to these partitions for (KafkaTopicPartition partition : subscribedPartitions.keySet()) { - assertThat(restoredGlobalSubscribedPartitions, not(hasKey(partition))); + assertThat(restoredGlobalSubscribedPartitions) + .satisfies(matching(not(hasKey(partition)))); } restoredGlobalSubscribedPartitions.putAll(subscribedPartitions); } - assertThat(restoredGlobalSubscribedPartitions.values(), hasSize(restoredNumPartitions)); - assertThat( - mockFetchedPartitionsOnStartup, - everyItem(isIn(restoredGlobalSubscribedPartitions.keySet()))); + assertThat(restoredGlobalSubscribedPartitions.values()) + .satisfies(matching(hasSize(restoredNumPartitions))); + assertThat(mockFetchedPartitionsOnStartup) + .satisfies(matching(everyItem(isIn(restoredGlobalSubscribedPartitions.keySet())))); } @Test @@ -908,7 +894,7 @@ public void testOpen() throws Exception { 0); testHarness.open(); - assertThat("Open method was not called", deserializationSchema.isOpenCalled(), is(true)); + assertThat(deserializationSchema.isOpenCalled()).as("Open method was not called").isTrue(); } @Test @@ -922,10 +908,9 @@ public void testOpenWithRestoreState() throws Exception { new TestingListState<>(); setupConsumer(consumer, true, restoredListState, true, 0, 1); - assertThat( - "DeserializationSchema's open method was not invoked", - deserializationSchema.isOpenCalled(), - is(true)); + assertThat(deserializationSchema.isOpenCalled()) + .as("DeserializationSchema's open method was not invoked") + .isTrue(); } // ------------------------------------------------------------------------ diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java index 2c46670a32653..79f12abb95e84 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaInternalProducerITCase.java @@ -38,8 +38,7 @@ import java.util.Properties; import java.util.UUID; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for our own {@link FlinkKafkaInternalProducer}. */ @SuppressWarnings("serial") @@ -96,7 +95,9 @@ public void testHappyPath() throws Exception { } finally { kafkaProducer.close(Duration.ofSeconds(5)); } - assertNull("The message should have been successfully sent", exceptionInCallback); + assertThat(exceptionInCallback) + .as("The message should have been successfully sent") + .isNull(); assertRecord(topicName, "42", "42"); deleteTestTopic(topicName); } @@ -112,7 +113,9 @@ public void testResumeTransaction() throws Exception { kafkaProducer.send( new ProducerRecord<>(topicName, "42", "42"), new ErrorCheckingCallback()); kafkaProducer.flush(); - assertNull("The message should have been successfully sent", exceptionInCallback); + assertThat(exceptionInCallback) + .as("The message should have been successfully sent") + .isNull(); long producerId = kafkaProducer.getProducerId(); short epoch = kafkaProducer.getEpoch(); @@ -224,7 +227,9 @@ private FlinkKafkaInternalProducer getClosedProducer(String topi kafkaProducer.send( new ProducerRecord<>(topicName, "42", "42"), new ErrorCheckingCallback()); kafkaProducer.close(Duration.ofSeconds(5)); - assertNull("The message should have been successfully sent", exceptionInCallback); + assertThat(exceptionInCallback) + .as("The message should have been successfully sent") + .isNull(); return kafkaProducer; } @@ -237,8 +242,8 @@ private void assertRecord(String topicName, String expectedKey, String expectedV } ConsumerRecord record = Iterables.getOnlyElement(records); - assertEquals(expectedKey, record.key()); - assertEquals(expectedValue, record.value()); + assertThat(record.key()).isEqualTo(expectedKey); + assertThat(record.value()).isEqualTo(expectedValue); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java index f9229ae040f7a..755c4a6c558cc 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerBaseTest.java @@ -40,7 +40,6 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.junit.Assert; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -49,7 +48,8 @@ import java.util.List; import java.util.Properties; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; @@ -82,14 +82,16 @@ public void testKeyValueDeserializersSetIfMissing() throws Exception { new DummyFlinkKafkaProducer<>( props, new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); - assertTrue(props.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)); - assertTrue(props.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)); - assertTrue( - props.getProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) - .equals(ByteArraySerializer.class.getName())); - assertTrue( - props.getProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) - .equals(ByteArraySerializer.class.getName())); + assertThat(props).containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); + assertThat(props).containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); + assertThat( + props.getProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) + .equals(ByteArraySerializer.class.getName())) + .isTrue(); + assertThat( + props.getProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) + .equals(ByteArraySerializer.class.getName())) + .isTrue(); } /** Tests that partitions list is determinate and correctly provided to custom partitioner. */ @@ -165,13 +167,13 @@ public void testAsyncErrorRethrownOnInvoke() throws Throwable { testHarness.processElement(new StreamRecord<>("msg-2")); } catch (Exception e) { // the next invoke should rethrow the async exception - Assert.assertTrue(e.getCause().getMessage().contains("artificial async exception")); + assertThat(e.getCause().getMessage()).contains("artificial async exception"); // test succeeded return; } - Assert.fail(); + fail("unknown failure"); } /** @@ -202,13 +204,13 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { testHarness.snapshot(123L, 123L); } catch (Exception e) { // the next invoke should rethrow the async exception - Assert.assertTrue(e.getCause().getMessage().contains("artificial async exception")); + assertThat(e.getCause().getMessage()).contains("artificial async exception"); // test succeeded return; } - Assert.fail(); + fail("unknown failure"); } /** @@ -266,14 +268,14 @@ public void go() throws Exception { snapshotThread.sync(); } catch (Exception e) { // the snapshot should have failed with the async exception - Assert.assertTrue( - e.getCause().getMessage().contains("artificial async failure for 2nd message")); + assertThat(e.getCause().getMessage()) + .contains("artificial async failure for 2nd message"); // test succeeded return; } - Assert.fail(); + fail("unknown failure"); } /** @@ -302,7 +304,7 @@ public void testAtLeastOnceProducer() throws Throwable { testHarness.processElement(new StreamRecord<>("msg-3")); verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class)); - Assert.assertEquals(3, producer.getPendingSize()); + assertThat(producer.getPendingSize()).isEqualTo(3); // start a thread to perform checkpointing CheckedThread snapshotThread = @@ -321,22 +323,25 @@ public void go() throws Exception { // blocked; // this would block forever if the snapshot didn't perform a flush producer.waitUntilFlushStarted(); - Assert.assertTrue( - "Snapshot returned before all records were flushed", snapshotThread.isAlive()); + assertThat(snapshotThread.isAlive()) + .as("Snapshot returned before all records were flushed") + .isTrue(); // now, complete the callbacks producer.getPendingCallbacks().get(0).onCompletion(null, null); - Assert.assertTrue( - "Snapshot returned before all records were flushed", snapshotThread.isAlive()); - Assert.assertEquals(2, producer.getPendingSize()); + assertThat(snapshotThread.isAlive()) + .as("Snapshot returned before all records were flushed") + .isTrue(); + assertThat(producer.getPendingSize()).isEqualTo(2); producer.getPendingCallbacks().get(1).onCompletion(null, null); - Assert.assertTrue( - "Snapshot returned before all records were flushed", snapshotThread.isAlive()); - Assert.assertEquals(1, producer.getPendingSize()); + assertThat(snapshotThread.isAlive()) + .as("Snapshot returned before all records were flushed") + .isTrue(); + assertThat(producer.getPendingSize()).isEqualTo(1); producer.getPendingCallbacks().get(2).onCompletion(null, null); - Assert.assertEquals(0, producer.getPendingSize()); + assertThat(producer.getPendingSize()).isEqualTo(0); // this would fail with an exception if flushing wasn't completed before the snapshot method // returned diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java index 4fdd44e2beaed..7b345bf7d5695 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerITCase.java @@ -47,11 +47,9 @@ import static org.apache.flink.util.ExceptionUtils.findThrowable; import static org.apache.flink.util.Preconditions.checkState; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.startsWith; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.fail; /** * IT cases for the {@link FlinkKafkaProducer}. @@ -112,10 +110,9 @@ public void resourceCleanUp(FlinkKafkaProducer.Semantic semantic) throws Excepti } if (initialActiveThreads.isPresent()) { - assertThat( - "active threads count", - Thread.activeCount(), - lessThan(initialActiveThreads.get() + allowedEpsilonThreadCountGrow)); + assertThat(Thread.activeCount()) + .as("active threads count") + .isLessThan(initialActiveThreads.get() + allowedEpsilonThreadCountGrow); } else { initialActiveThreads = Optional.of(Thread.activeCount()); } @@ -178,7 +175,8 @@ public void testRestoreToCheckpointAfterExceedingProducersPool() throws Exceptio public void testFlinkKafkaProducerFailBeforeNotify() throws Exception { String topic = "flink-kafka-producer-fail-before-notify"; - OneInputStreamOperatorTestHarness testHarness = createTestHarness(topic); + final OneInputStreamOperatorTestHarness testHarness = + createTestHarness(topic); testHarness.setup(); testHarness.open(); @@ -190,13 +188,13 @@ public void testFlinkKafkaProducerFailBeforeNotify() throws Exception { int leaderId = kafkaServer.getLeaderToShutDown(topic); failBroker(leaderId); - try { - testHarness.processElement(44, 4); - testHarness.snapshot(2, 5); - fail(); - } catch (Exception ex) { - // expected - } + assertThatThrownBy( + () -> { + testHarness.processElement(44, 4); + testHarness.snapshot(2, 5); + }) + .isInstanceOf(Exception.class); + try { testHarness.close(); } catch (Exception ex) { @@ -204,10 +202,11 @@ public void testFlinkKafkaProducerFailBeforeNotify() throws Exception { kafkaServer.restartBroker(leaderId); - testHarness = createTestHarness(topic); - testHarness.setup(); - testHarness.initializeState(snapshot); - testHarness.close(); + final OneInputStreamOperatorTestHarness testHarness2 = + createTestHarness(topic); + testHarness2.setup(); + testHarness2.initializeState(snapshot); + testHarness2.close(); assertExactlyOnceForTopic(createProperties(), topic, Arrays.asList(42, 43)); @@ -637,9 +636,9 @@ public void testDefaultTransactionalIdPrefix() throws Exception { deleteTestTopic(topic); checkProducerLeak(); - assertNotNull(transactionalIdUsed); + assertThat(transactionalIdUsed).isNotNull(); String expectedTransactionalIdPrefix = taskName + "-" + operatorID.toHexString(); - assertThat(transactionalIdUsed, startsWith(expectedTransactionalIdPrefix)); + assertThat(transactionalIdUsed).startsWith(expectedTransactionalIdPrefix); } @Test @@ -671,8 +670,8 @@ public void testCustomizeTransactionalIdPrefix() throws Exception { deleteTestTopic(topic); checkProducerLeak(); - assertNotNull(transactionalIdUsed); - assertThat(transactionalIdUsed, startsWith(transactionalIdPrefix)); + assertThat(transactionalIdUsed).isNotNull(); + assertThat(transactionalIdUsed).startsWith(transactionalIdPrefix); } @Test diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java index 2886fe31f6a06..6fedcc43ca018 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducerTest.java @@ -33,8 +33,7 @@ import java.util.Optional; import java.util.Properties; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link FlinkKafkaProducer}. */ public class FlinkKafkaProducerTest { @@ -55,7 +54,7 @@ public void testOpenSerializationSchemaProducer() throws Exception { testHarness.open(); - assertThat(schema.openCalled, equalTo(true)); + assertThat(schema.openCalled).isTrue(); } @Test @@ -81,7 +80,7 @@ public void testOpenKafkaSerializationSchemaProducer() throws Exception { testHarness.open(); - assertThat(schema.openCalled, equalTo(true)); + assertThat(schema.openCalled).isTrue(); } @Test @@ -107,7 +106,7 @@ public void testOpenKafkaCustomPartitioner() throws Exception { testHarness.open(); - assertThat(partitioner.openCalled, equalTo(true)); + assertThat(partitioner.openCalled).isTrue(); } @Test(expected = NullPointerException.class) diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java index 3c6dc197df687..0f67ba9c1799d 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/JSONKeyValueDeserializationSchemaTest.java @@ -23,9 +23,10 @@ import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.junit.Assert; import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** Tests for the{@link JSONKeyValueDeserializationSchema}. */ public class JSONKeyValueDeserializationSchemaTest { @@ -44,9 +45,9 @@ public void testDeserializeWithoutMetadata() throws Exception { ObjectNode deserializedValue = schema.deserialize(newConsumerRecord(serializedKey, serializedValue)); - Assert.assertTrue(deserializedValue.get("metadata") == null); - Assert.assertEquals(4, deserializedValue.get("key").get("index").asInt()); - Assert.assertEquals("world", deserializedValue.get("value").get("word").asText()); + assertThat(deserializedValue.get("metadata")).isNull(); + assertThat(deserializedValue.get("key").get("index").asInt()).isEqualTo(4); + assertThat(deserializedValue.get("value").get("word").asText()).isEqualTo("world"); } @Test @@ -62,9 +63,9 @@ public void testDeserializeWithoutKey() throws Exception { ObjectNode deserializedValue = schema.deserialize(newConsumerRecord(serializedKey, serializedValue)); - Assert.assertTrue(deserializedValue.get("metadata") == null); - Assert.assertTrue(deserializedValue.get("key") == null); - Assert.assertEquals("world", deserializedValue.get("value").get("word").asText()); + assertThat(deserializedValue.get("metadata")).isNull(); + assertThat(deserializedValue.get("key")).isNull(); + assertThat(deserializedValue.get("value").get("word").asText()).isEqualTo("world"); } private static ConsumerRecord newConsumerRecord( @@ -95,9 +96,9 @@ public void testDeserializeWithoutValue() throws Exception { ObjectNode deserializedValue = schema.deserialize(newConsumerRecord(serializedKey, serializedValue)); - Assert.assertTrue(deserializedValue.get("metadata") == null); - Assert.assertEquals(4, deserializedValue.get("key").get("index").asInt()); - Assert.assertTrue(deserializedValue.get("value") == null); + assertThat(deserializedValue.get("metadata")).isNull(); + assertThat(deserializedValue.get("key").get("index").asInt()).isEqualTo(4); + assertThat(deserializedValue.get("value")).isNull(); } @Test @@ -116,10 +117,10 @@ public void testDeserializeWithMetadata() throws Exception { newConsumerRecord("topic#1", 3, 4L, serializedKey, serializedValue); ObjectNode deserializedValue = schema.deserialize(consumerRecord); - Assert.assertEquals(4, deserializedValue.get("key").get("index").asInt()); - Assert.assertEquals("world", deserializedValue.get("value").get("word").asText()); - Assert.assertEquals("topic#1", deserializedValue.get("metadata").get("topic").asText()); - Assert.assertEquals(4, deserializedValue.get("metadata").get("offset").asInt()); - Assert.assertEquals(3, deserializedValue.get("metadata").get("partition").asInt()); + assertThat(deserializedValue.get("key").get("index").asInt()).isEqualTo(4); + assertThat(deserializedValue.get("value").get("word").asText()).isEqualTo("world"); + assertThat(deserializedValue.get("metadata").get("topic").asText()).isEqualTo("topic#1"); + assertThat(deserializedValue.get("metadata").get("offset").asInt()).isEqualTo(4); + assertThat(deserializedValue.get("metadata").get("partition").asInt()).isEqualTo(3); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java index 764f5d180f319..64ebbdd64040e 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaConsumerTestBase.java @@ -90,7 +90,6 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.NotLeaderForPartitionException; import org.apache.kafka.common.errors.TimeoutException; -import org.junit.Assert; import org.junit.Before; import javax.annotation.Nullable; @@ -119,13 +118,8 @@ import static org.apache.flink.streaming.connectors.kafka.testutils.ClusterCommunicationUtils.waitUntilNoJobIsRunning; import static org.apache.flink.test.util.TestUtils.submitJobAndWaitForResult; import static org.apache.flink.test.util.TestUtils.tryExecute; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Abstract test base for all Kafka consumer tests. */ @SuppressWarnings("serial") @@ -194,17 +188,15 @@ public void runFailOnNoBrokerTest() throws Exception { } catch (JobExecutionException jee) { final Optional optionalTimeoutException = ExceptionUtils.findThrowable(jee, TimeoutException.class); - assertTrue(optionalTimeoutException.isPresent()); + assertThat(optionalTimeoutException).isPresent(); final TimeoutException timeoutException = optionalTimeoutException.get(); if (useNewSource) { - assertThat( - timeoutException.getCause().getMessage(), - containsString("Timed out waiting for a node assignment.")); + assertThat(timeoutException.getCause().getMessage()) + .contains("Timed out waiting for a node assignment."); } else { - assertEquals( - "Timeout expired while fetching topic metadata", - timeoutException.getMessage()); + assertThat(timeoutException) + .hasMessage("Timeout expired while fetching topic metadata"); } } } @@ -278,9 +270,9 @@ public void run() { Long o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0); Long o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1); Long o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2); - Assert.assertEquals(Long.valueOf(50L), o1); - Assert.assertEquals(Long.valueOf(50L), o2); - Assert.assertEquals(Long.valueOf(50L), o3); + assertThat(o1).isEqualTo(Long.valueOf(50L)); + assertThat(o2).isEqualTo(Long.valueOf(50L)); + assertThat(o3).isEqualTo(Long.valueOf(50L)); kafkaOffsetHandler.close(); deleteTestTopic(topicName); @@ -369,9 +361,9 @@ public void run() { Long o1 = kafkaOffsetHandler.getCommittedOffset(topicName, 0); Long o2 = kafkaOffsetHandler.getCommittedOffset(topicName, 1); Long o3 = kafkaOffsetHandler.getCommittedOffset(topicName, 2); - Assert.assertEquals(Long.valueOf(50L), o1); - Assert.assertEquals(Long.valueOf(50L), o2); - Assert.assertEquals(Long.valueOf(50L), o3); + assertThat(o1).isEqualTo(Long.valueOf(50L)); + assertThat(o2).isEqualTo(Long.valueOf(50L)); + assertThat(o3).isEqualTo(Long.valueOf(50L)); kafkaOffsetHandler.close(); deleteTestTopic(topicName); @@ -894,19 +886,17 @@ public void cancel() { new RichSinkFunction>() { private int elCnt = 0; + private BitSet validator = new BitSet(totalElements); @Override public void invoke(Tuple2 value) throws Exception { String[] sp = value.f1.split("-"); int v = Integer.parseInt(sp[1]); - - assertEquals(value.f0 - 1000, (long) v); - - assertFalse("Received tuple twice", validator.get(v)); + assertThat((long) v).isEqualTo(value.f0 - 1000); + assertThat(validator.get(v)).as("Received tuple twice").isFalse(); validator.set(v); elCnt++; - if (elCnt == totalElements) { // check if everything in the bitset is set to true int nc; @@ -1149,7 +1139,7 @@ public void runCancelingOnFullInputTest() throws Exception { Throwable failueCause = jobError.get(); if (failueCause != null) { failueCause.printStackTrace(); - Assert.fail("Test failed prematurely with: " + failueCause.getMessage()); + fail("Test failed prematurely with: " + failueCause.getMessage()); } // cancel @@ -1158,7 +1148,7 @@ public void runCancelingOnFullInputTest() throws Exception { // wait for the program to be done and validate that we failed with the right exception runnerThread.join(); - assertEquals(JobStatus.CANCELED, client.getJobStatus(jobId).get()); + assertThat(client.getJobStatus(jobId).get()).isEqualTo(JobStatus.CANCELED); if (generator.isAlive()) { generator.shutdown(); @@ -1218,7 +1208,7 @@ public void runCancelingOnEmptyInputTest() throws Exception { Throwable failueCause = error.get(); if (failueCause != null) { failueCause.printStackTrace(); - Assert.fail("Test failed prematurely with: " + failueCause.getMessage()); + fail("Test failed prematurely with: " + failueCause.getMessage()); } // cancel client.cancel(jobId).get(); @@ -1226,7 +1216,7 @@ public void runCancelingOnEmptyInputTest() throws Exception { // wait for the program to be done and validate that we failed with the right exception runnerThread.join(); - assertEquals(JobStatus.CANCELED, client.getJobStatus(jobId).get()); + assertThat(client.getJobStatus(jobId).get()).isEqualTo(JobStatus.CANCELED); deleteTestTopic(topic); } @@ -1579,17 +1569,20 @@ public void cancel() {} DataStream> fromKafka = getStream(env, topic, readSchema, props); fromKafka.flatMap( new RichFlatMapFunction, Object>() { + long counter = 0; @Override public void flatMap(Tuple2 value, Collector out) throws Exception { // the elements should be in order. - Assert.assertTrue("Wrong value " + value.f1.lat, value.f1.lat == counter); + assertThat(value.f1.lat) + .as("Wrong value " + value.f1.lat) + .isEqualTo(counter); if (value.f1.lat % 2 == 0) { - assertNull("key was not null", value.f0); + assertThat(value.f0).as("key was not null").isNull(); } else { - Assert.assertTrue("Wrong value " + value.f0, value.f0 == counter); + assertThat(value.f0).as("Wrong value " + value.f0).isEqualTo(counter); } counter++; if (counter == elementCount) { @@ -1671,13 +1664,14 @@ public void cancel() {} fromKafka.flatMap( new RichFlatMapFunction, Object>() { + long counter = 0; @Override public void flatMap(Tuple2 value, Collector out) throws Exception { // ensure that deleted messages are passed as nulls - assertNull(value.f1); + assertThat(value.f1).isNull(); counter++; if (counter == elementCount) { // we got the right number of elements @@ -1888,7 +1882,7 @@ public void cancel() { offsetMetrics = mBeanServer.queryNames(new ObjectName("*current-offsets*:*"), null); Thread.sleep(50); } - Assert.assertEquals(5, offsetMetrics.size()); + assertThat(offsetMetrics).hasSize(5); // we can't rely on the consumer to have touched all the partitions already // that's why we'll wait until all five partitions have a positive offset. // The test will fail if we never meet the condition @@ -1911,7 +1905,7 @@ public void cancel() { // check if producer metrics are also available. Set producerMetrics = mBeanServer.queryNames(new ObjectName("*KafkaProducer*:*"), null); - Assert.assertTrue("No producer metrics found", producerMetrics.size() > 30); + assertThat(producerMetrics.size()).as("No producer metrics found").isGreaterThan(30); LOG.info("Found all JMX metrics. Cancelling job."); } finally { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java index ce7db35c5c48f..b578bc0b6d81b 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaProducerTestBase.java @@ -55,8 +55,8 @@ import java.util.Properties; import static org.apache.flink.test.util.TestUtils.tryExecute; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Abstract test base for all Kafka producer tests. */ @SuppressWarnings("serial") @@ -317,7 +317,7 @@ public int partition( byte[] serializedValue, String topic, int[] partitions) { - assertEquals(expectedTopicsToNumPartitions.get(topic).intValue(), partitions.length); + assertThat(partitions).hasSize(expectedTopicsToNumPartitions.get(topic).intValue()); return (int) (next.f0 % partitions.length); } @@ -366,7 +366,7 @@ public PartitionValidatingMapper(int numPartitions) { public Integer map(Tuple2 value) throws Exception { int partition = value.f0.intValue() % numPartitions; if (ourPartition != -1) { - assertEquals("inconsistent partitioning", ourPartition, partition); + assertThat(partition).as("inconsistent partitioning").isEqualTo(ourPartition); } else { ourPartition = partition; } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java index 201d5a2a39b88..990047d99c540 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaShortRetentionTestBase.java @@ -45,8 +45,6 @@ import java.util.Properties; import static org.apache.flink.test.util.TestUtils.tryExecute; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; /** * A class containing a special Kafka broker which has a log retention of only 250 ms. This way, we @@ -227,68 +225,4 @@ public TypeInformation getProducedType() { return Types.STRING; } } - - /** Ensure that the consumer is properly failing if "auto.offset.reset" is set to "none". */ - public void runFailOnAutoOffsetResetNone() throws Exception { - final String topic = "auto-offset-reset-none-test"; - final int parallelism = 1; - - kafkaServer.createTestTopic(topic, parallelism, 1); - - final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); - env.setParallelism(parallelism); - env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately - - // ----------- add consumer ---------- - - Properties customProps = new Properties(); - customProps.putAll(standardProps); - customProps.putAll(secureProps); - customProps.setProperty( - "auto.offset.reset", "none"); // test that "none" leads to an exception - FlinkKafkaConsumerBase source = - kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps); - - DataStreamSource consuming = env.addSource(source); - consuming.addSink(new DiscardingSink()); - - try { - env.execute("Test auto offset reset none"); - } catch (Throwable e) { - // check if correct exception has been thrown - if (!e.getCause() - .getCause() - .getMessage() - .contains("Undefined offset with no reset policy for partition")) { - throw e; - } - } - - kafkaServer.deleteTestTopic(topic); - } - - public void runFailOnAutoOffsetResetNoneEager() throws Exception { - final String topic = "auto-offset-reset-none-test"; - final int parallelism = 1; - - kafkaServer.createTestTopic(topic, parallelism, 1); - - // ----------- add consumer ---------- - - Properties customProps = new Properties(); - customProps.putAll(standardProps); - customProps.putAll(secureProps); - customProps.setProperty( - "auto.offset.reset", "none"); // test that "none" leads to an exception - - try { - kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps); - fail("should fail with an exception"); - } catch (IllegalArgumentException e) { - // expected - assertTrue(e.getMessage().contains("none")); - } - - kafkaServer.deleteTestTopic(topic); - } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java index 01dc97d7465ef..37fd81dd1858c 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestBase.java @@ -58,7 +58,7 @@ import scala.concurrent.duration.FiniteDuration; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.fail; /** * The base for the Kafka tests. It brings up: diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java index adfbdce6c9557..df2824ae02d61 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/KafkaTestEnvironmentImpl.java @@ -65,7 +65,7 @@ import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.fail; /** An implementation of the KafkaServerProvider. */ public class KafkaTestEnvironmentImpl extends KafkaTestEnvironment { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java index efcb8cb7bb23b..92fc08eb7ceec 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherTest.java @@ -39,8 +39,7 @@ import java.util.Optional; import java.util.Queue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for the {@link AbstractFetcher}. */ @SuppressWarnings("serial") @@ -84,8 +83,8 @@ public void onException(Throwable cause) { } }); - assertTrue(fetcher.getLastCommittedOffsets().isPresent()); - assertEquals(Collections.emptyMap(), fetcher.getLastCommittedOffsets().get()); + assertThat(fetcher.getLastCommittedOffsets()).isPresent(); + assertThat(fetcher.getLastCommittedOffsets().get()).isEmpty(); } } @@ -116,20 +115,15 @@ public void testSkipCorruptedRecord() throws Exception { emitRecord(fetcher, 1L, partitionStateHolder, 1L); emitRecord(fetcher, 2L, partitionStateHolder, 2L); - assertEquals(2L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(2L, partitionStateHolder.getOffset()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(2L); + assertThat(partitionStateHolder.getOffset()).isEqualTo(2L); // emit no records fetcher.emitRecordsWithTimestamps(emptyQueue(), partitionStateHolder, 3L, Long.MIN_VALUE); - assertEquals( - 2L, - sourceContext - .getLatestElement() - .getValue() - .longValue()); // the null record should be skipped - assertEquals( - 3L, - partitionStateHolder.getOffset()); // the offset in state still should have advanced + assertThat(sourceContext.getLatestElement().getValue().longValue()) + .isEqualTo(2L); // the null record should be skipped + assertThat(partitionStateHolder.getOffset()) + .isEqualTo(3L); // the offset in state still should have advanced } @Test diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java index e3751db580173..6cd3b64537a5e 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractFetcherWatermarksTest.java @@ -50,9 +50,7 @@ import java.util.Map; import java.util.Queue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for the watermarking behaviour of {@link AbstractFetcher}. */ @SuppressWarnings("serial") @@ -114,42 +112,42 @@ public void testPeriodicWatermarks() throws Exception { emitRecord(fetcher, 1L, part1, 1L); emitRecord(fetcher, 2L, part1, 2L); emitRecord(fetcher, 3L, part1, 3L); - assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(3L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(3L); // elements for partition 2 emitRecord(fetcher, 12L, part2, 1L); - assertEquals(12L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(12L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(12L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(12L); // elements for partition 3 emitRecord(fetcher, 101L, part3, 1L); emitRecord(fetcher, 102L, part3, 2L); - assertEquals(102L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(102L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(102L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(102L); processingTimeService.setCurrentTime(10); // now, we should have a watermark (this blocks until the periodic thread emitted the // watermark) - assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp()); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(3L); // advance partition 3 emitRecord(fetcher, 1003L, part3, 3L); emitRecord(fetcher, 1004L, part3, 4L); emitRecord(fetcher, 1005L, part3, 5L); - assertEquals(1005L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(1005L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(1005L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(1005L); // advance partition 1 beyond partition 2 - this bumps the watermark emitRecord(fetcher, 30L, part1, 4L); - assertEquals(30L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(30L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(30L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(30L); processingTimeService.setCurrentTime(20); // this blocks until the periodic thread emitted the watermark - assertEquals(12L, sourceContext.getLatestWatermark().getTimestamp()); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(12L); // advance partition 2 again - this bumps the watermark emitRecord(fetcher, 13L, part2, 2L); @@ -159,7 +157,7 @@ public void testPeriodicWatermarks() throws Exception { processingTimeService.setCurrentTime(30); // this blocks until the periodic thread emitted the watermark long watermarkTs = sourceContext.getLatestWatermark().getTimestamp(); - assertTrue(watermarkTs >= 13L && watermarkTs <= 15L); + assertThat(watermarkTs >= 13L && watermarkTs <= 15L).isTrue(); } @Test @@ -189,28 +187,28 @@ public void testSkipCorruptedRecordWithPeriodicWatermarks() throws Exception { emitRecord(fetcher, 1L, partitionStateHolder, 1L); emitRecord(fetcher, 2L, partitionStateHolder, 2L); emitRecord(fetcher, 3L, partitionStateHolder, 3L); - assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); - assertEquals(3L, partitionStateHolder.getOffset()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(3L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(3L); + assertThat(partitionStateHolder.getOffset()).isEqualTo(3L); // advance timer for watermark emitting processingTimeProvider.setCurrentTime(10L); - assertTrue(sourceContext.hasWatermark()); - assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp()); + assertThat(sourceContext.hasWatermark()).isTrue(); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(3L); // emit no records fetcher.emitRecordsWithTimestamps( emptyQueue(), partitionStateHolder, 4L, Long.MIN_VALUE); // no elements should have been collected - assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(3L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(3L); // the offset in state still should have advanced - assertEquals(4L, partitionStateHolder.getOffset()); + assertThat(partitionStateHolder.getOffset()).isEqualTo(4L); // no watermarks should be collected processingTimeProvider.setCurrentTime(20L); - assertFalse(sourceContext.hasWatermark()); + assertThat(sourceContext.hasWatermark()).isFalse(); } @Test @@ -234,7 +232,7 @@ public void testPeriodicWatermarksWithNoSubscribedPartitionsShouldYieldNoWaterma processingTimeProvider.setCurrentTime(10); // no partitions; when the periodic watermark emitter fires, no watermark should be // emitted - assertFalse(sourceContext.hasWatermark()); + assertThat(sourceContext.hasWatermark()).isFalse(); // counter-test that when the fetcher does actually have partitions, // when the periodic watermark emitter fires again, a watermark really is emitted @@ -242,7 +240,7 @@ public void testPeriodicWatermarksWithNoSubscribedPartitionsShouldYieldNoWaterma Collections.singletonList(new KafkaTopicPartition(testTopic, 0))); emitRecord(fetcher, 100L, fetcher.subscribedPartitionStates().get(0), 3L); processingTimeProvider.setCurrentTime(20); - assertEquals(100, sourceContext.getLatestWatermark().getTimestamp()); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(100); } } @@ -280,21 +278,21 @@ public void testSkipCorruptedRecordWithPunctuatedWatermarks() throws Exception { emitRecord(fetcher, 1L, partitionStateHolder, 1L); emitRecord(fetcher, 2L, partitionStateHolder, 2L); emitRecord(fetcher, 3L, partitionStateHolder, 3L); - assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); - assertTrue(sourceContext.hasWatermark()); - assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp()); - assertEquals(3L, partitionStateHolder.getOffset()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(3L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(3L); + assertThat(sourceContext.hasWatermark()).isTrue(); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(3L); + assertThat(partitionStateHolder.getOffset()).isEqualTo(3L); // emit no records fetcher.emitRecordsWithTimestamps(emptyQueue(), partitionStateHolder, 4L, -1L); // no elements or watermarks should have been collected - assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); - assertFalse(sourceContext.hasWatermark()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(3L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(3L); + assertThat(sourceContext.hasWatermark()).isFalse(); // the offset in state still should have advanced - assertEquals(4L, partitionStateHolder.getOffset()); + assertThat(partitionStateHolder.getOffset()).isEqualTo(4L); } @Test @@ -340,48 +338,48 @@ public void testPunctuatedWatermarks() throws Exception { emitRecords(fetcher, Arrays.asList(1L, 2L), part1, 1L); emitRecord(fetcher, 2L, part1, 2L); emitRecords(fetcher, Arrays.asList(2L, 3L), part1, 3L); - assertEquals(3L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(3L, sourceContext.getLatestElement().getTimestamp()); - assertFalse(sourceContext.hasWatermark()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(3L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(3L); + assertThat(sourceContext.hasWatermark()).isFalse(); // elements for partition 2 emitRecord(fetcher, 12L, part2, 1L); - assertEquals(12L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(12L, sourceContext.getLatestElement().getTimestamp()); - assertFalse(sourceContext.hasWatermark()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(12L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(12L); + assertThat(sourceContext.hasWatermark()).isFalse(); // elements for partition 3 emitRecord(fetcher, 101L, part3, 1L); emitRecord(fetcher, 102L, part3, 2L); - assertEquals(102L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(102L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(102L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(102L); // now, we should have a watermark - assertTrue(sourceContext.hasWatermark()); - assertEquals(3L, sourceContext.getLatestWatermark().getTimestamp()); + assertThat(sourceContext.hasWatermark()).isTrue(); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(3L); // advance partition 3 emitRecord(fetcher, 1003L, part3, 3L); emitRecord(fetcher, 1004L, part3, 4L); emitRecord(fetcher, 1005L, part3, 5L); - assertEquals(1005L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(1005L, sourceContext.getLatestElement().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(1005L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(1005L); // advance partition 1 beyond partition 2 - this bumps the watermark emitRecord(fetcher, 30L, part1, 4L); - assertEquals(30L, sourceContext.getLatestElement().getValue().longValue()); - assertEquals(30L, sourceContext.getLatestElement().getTimestamp()); - assertTrue(sourceContext.hasWatermark()); - assertEquals(12L, sourceContext.getLatestWatermark().getTimestamp()); + assertThat(sourceContext.getLatestElement().getValue().longValue()).isEqualTo(30L); + assertThat(sourceContext.getLatestElement().getTimestamp()).isEqualTo(30L); + assertThat(sourceContext.hasWatermark()).isTrue(); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(12L); // advance partition 2 again - this bumps the watermark emitRecord(fetcher, 13L, part2, 2L); - assertFalse(sourceContext.hasWatermark()); + assertThat(sourceContext.hasWatermark()).isFalse(); emitRecord(fetcher, 14L, part2, 3L); - assertFalse(sourceContext.hasWatermark()); + assertThat(sourceContext.hasWatermark()).isFalse(); emitRecord(fetcher, 15L, part2, 3L); - assertTrue(sourceContext.hasWatermark()); - assertEquals(15L, sourceContext.getLatestWatermark().getTimestamp()); + assertThat(sourceContext.hasWatermark()).isTrue(); + assertThat(sourceContext.getLatestWatermark().getTimestamp()).isEqualTo(15L); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java index 7c9e4b2b430e3..e8ea58b1613a9 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/AbstractPartitionDiscovererTest.java @@ -33,9 +33,8 @@ import java.util.Set; import java.util.regex.Pattern; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** * Tests that the partition assignment in the partition discoverer is deterministic and stable, with @@ -98,20 +97,22 @@ public void testPartitionsEqualConsumersFixedPartitions() throws Exception { partitionDiscoverer.open(); List initialDiscovery = partitionDiscoverer.discoverPartitions(); - assertEquals(1, initialDiscovery.size()); - assertTrue( - contains( - mockGetAllPartitionsForTopicsReturn, - initialDiscovery.get(0).getPartition())); - assertEquals( - getExpectedSubtaskIndex(initialDiscovery.get(0), numConsumers, numSubtasks), - subtaskIndex); + assertThat(initialDiscovery).hasSize(1); + assertThat( + contains( + mockGetAllPartitionsForTopicsReturn, + initialDiscovery.get(0).getPartition())) + .isTrue(); + assertThat(subtaskIndex) + .isEqualTo( + getExpectedSubtaskIndex( + initialDiscovery.get(0), numConsumers, numSubtasks)); // subsequent discoveries should not find anything List secondDiscovery = partitionDiscoverer.discoverPartitions(); List thirdDiscovery = partitionDiscoverer.discoverPartitions(); - assertEquals(0, secondDiscovery.size()); - assertEquals(0, thirdDiscovery.size()); + assertThat(secondDiscovery).isEmpty(); + assertThat(thirdDiscovery).isEmpty(); } } @@ -157,26 +158,27 @@ public void testMultiplePartitionsPerConsumersFixedPartitions() { List initialDiscovery = partitionDiscoverer.discoverPartitions(); - assertTrue(initialDiscovery.size() >= minPartitionsPerConsumer); - assertTrue(initialDiscovery.size() <= maxPartitionsPerConsumer); + assertThat(initialDiscovery.size()) + .isGreaterThanOrEqualTo(minPartitionsPerConsumer); + assertThat(initialDiscovery.size()).isLessThanOrEqualTo(maxPartitionsPerConsumer); for (KafkaTopicPartition p : initialDiscovery) { // check that the element was actually contained - assertTrue(allPartitions.remove(p)); - assertEquals( - getExpectedSubtaskIndex(p, startIndex, numConsumers), subtaskIndex); + assertThat(allPartitions.remove(p)).isTrue(); + assertThat(subtaskIndex) + .isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } // subsequent discoveries should not find anything List secondDiscovery = partitionDiscoverer.discoverPartitions(); List thirdDiscovery = partitionDiscoverer.discoverPartitions(); - assertEquals(0, secondDiscovery.size()); - assertEquals(0, thirdDiscovery.size()); + assertThat(secondDiscovery).isEmpty(); + assertThat(thirdDiscovery).isEmpty(); } // all partitions must have been assigned - assertTrue(allPartitions.isEmpty()); + assertThat(allPartitions).isEmpty(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -220,25 +222,25 @@ public void testPartitionsFewerThanConsumersFixedPartitions() { List initialDiscovery = partitionDiscoverer.discoverPartitions(); - assertTrue(initialDiscovery.size() <= 1); + assertThat(initialDiscovery.size()).isLessThanOrEqualTo(1); for (KafkaTopicPartition p : initialDiscovery) { // check that the element was actually contained - assertTrue(allPartitions.remove(p)); - assertEquals( - getExpectedSubtaskIndex(p, startIndex, numConsumers), subtaskIndex); + assertThat(allPartitions.remove(p)).isTrue(); + assertThat(subtaskIndex) + .isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } // subsequent discoveries should not find anything List secondDiscovery = partitionDiscoverer.discoverPartitions(); List thirdDiscovery = partitionDiscoverer.discoverPartitions(); - assertEquals(0, secondDiscovery.size()); - assertEquals(0, thirdDiscovery.size()); + assertThat(secondDiscovery).isEmpty(); + assertThat(thirdDiscovery).isEmpty(); } // all partitions must have been assigned - assertTrue(allPartitions.isEmpty()); + assertThat(allPartitions).isEmpty(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -314,33 +316,39 @@ public void testGrowingPartitions() { List initialDiscoverySubtask2 = partitionDiscovererSubtask2.discoverPartitions(); - assertTrue(initialDiscoverySubtask0.size() >= minInitialPartitionsPerConsumer); - assertTrue(initialDiscoverySubtask0.size() <= maxInitialPartitionsPerConsumer); - assertTrue(initialDiscoverySubtask1.size() >= minInitialPartitionsPerConsumer); - assertTrue(initialDiscoverySubtask1.size() <= maxInitialPartitionsPerConsumer); - assertTrue(initialDiscoverySubtask2.size() >= minInitialPartitionsPerConsumer); - assertTrue(initialDiscoverySubtask2.size() <= maxInitialPartitionsPerConsumer); + assertThat(initialDiscoverySubtask0.size()) + .isGreaterThanOrEqualTo(minInitialPartitionsPerConsumer); + assertThat(initialDiscoverySubtask0.size()) + .isLessThanOrEqualTo(maxInitialPartitionsPerConsumer); + assertThat(initialDiscoverySubtask1.size()) + .isGreaterThanOrEqualTo(minInitialPartitionsPerConsumer); + assertThat(initialDiscoverySubtask1.size()) + .isLessThanOrEqualTo(maxInitialPartitionsPerConsumer); + assertThat(initialDiscoverySubtask2.size()) + .isGreaterThanOrEqualTo(minInitialPartitionsPerConsumer); + assertThat(initialDiscoverySubtask2.size()) + .isLessThanOrEqualTo(maxInitialPartitionsPerConsumer); for (KafkaTopicPartition p : initialDiscoverySubtask0) { // check that the element was actually contained - assertTrue(allInitialPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 0); + assertThat(allInitialPartitions.remove(p)).isTrue(); + assertThat(0).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } for (KafkaTopicPartition p : initialDiscoverySubtask1) { // check that the element was actually contained - assertTrue(allInitialPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 1); + assertThat(allInitialPartitions.remove(p)).isTrue(); + assertThat(1).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } for (KafkaTopicPartition p : initialDiscoverySubtask2) { // check that the element was actually contained - assertTrue(allInitialPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 2); + assertThat(allInitialPartitions.remove(p)).isTrue(); + assertThat(2).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } // all partitions must have been assigned - assertTrue(allInitialPartitions.isEmpty()); + assertThat(allInitialPartitions).isEmpty(); // now, execute discover again (should find the extra new partitions) List secondDiscoverySubtask0 = @@ -351,63 +359,60 @@ public void testGrowingPartitions() { partitionDiscovererSubtask2.discoverPartitions(); // new discovered partitions must not have been discovered before - assertTrue(Collections.disjoint(secondDiscoverySubtask0, initialDiscoverySubtask0)); - assertTrue(Collections.disjoint(secondDiscoverySubtask1, initialDiscoverySubtask1)); - assertTrue(Collections.disjoint(secondDiscoverySubtask2, initialDiscoverySubtask2)); - - assertTrue( - secondDiscoverySubtask0.size() + initialDiscoverySubtask0.size() - >= minNewPartitionsPerConsumer); - assertTrue( - secondDiscoverySubtask0.size() + initialDiscoverySubtask0.size() - <= maxNewPartitionsPerConsumer); - assertTrue( - secondDiscoverySubtask1.size() + initialDiscoverySubtask1.size() - >= minNewPartitionsPerConsumer); - assertTrue( - secondDiscoverySubtask1.size() + initialDiscoverySubtask1.size() - <= maxNewPartitionsPerConsumer); - assertTrue( - secondDiscoverySubtask2.size() + initialDiscoverySubtask2.size() - >= minNewPartitionsPerConsumer); - assertTrue( - secondDiscoverySubtask2.size() + initialDiscoverySubtask2.size() - <= maxNewPartitionsPerConsumer); + assertThat(Collections.disjoint(secondDiscoverySubtask0, initialDiscoverySubtask0)) + .isTrue(); + assertThat(Collections.disjoint(secondDiscoverySubtask1, initialDiscoverySubtask1)) + .isTrue(); + assertThat(Collections.disjoint(secondDiscoverySubtask2, initialDiscoverySubtask2)) + .isTrue(); + + assertThat(secondDiscoverySubtask0.size() + initialDiscoverySubtask0.size()) + .isGreaterThanOrEqualTo(minNewPartitionsPerConsumer); + assertThat(secondDiscoverySubtask0.size() + initialDiscoverySubtask0.size()) + .isLessThanOrEqualTo(maxNewPartitionsPerConsumer); + assertThat(secondDiscoverySubtask1.size() + initialDiscoverySubtask1.size()) + .isGreaterThanOrEqualTo(minNewPartitionsPerConsumer); + assertThat(secondDiscoverySubtask1.size() + initialDiscoverySubtask1.size()) + .isLessThanOrEqualTo(maxNewPartitionsPerConsumer); + assertThat(secondDiscoverySubtask2.size() + initialDiscoverySubtask2.size()) + .isGreaterThanOrEqualTo(minNewPartitionsPerConsumer); + assertThat(secondDiscoverySubtask2.size() + initialDiscoverySubtask2.size()) + .isLessThanOrEqualTo(maxNewPartitionsPerConsumer); // check that the two discoveries combined form all partitions for (KafkaTopicPartition p : initialDiscoverySubtask0) { - assertTrue(allNewPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 0); + assertThat(allNewPartitions.remove(p)).isTrue(); + assertThat(0).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } for (KafkaTopicPartition p : initialDiscoverySubtask1) { - assertTrue(allNewPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 1); + assertThat(allNewPartitions.remove(p)).isTrue(); + assertThat(1).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } for (KafkaTopicPartition p : initialDiscoverySubtask2) { - assertTrue(allNewPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 2); + assertThat(allNewPartitions.remove(p)).isTrue(); + assertThat(2).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } for (KafkaTopicPartition p : secondDiscoverySubtask0) { - assertTrue(allNewPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 0); + assertThat(allNewPartitions.remove(p)).isTrue(); + assertThat(0).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } for (KafkaTopicPartition p : secondDiscoverySubtask1) { - assertTrue(allNewPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 1); + assertThat(allNewPartitions.remove(p)).isTrue(); + assertThat(1).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } for (KafkaTopicPartition p : secondDiscoverySubtask2) { - assertTrue(allNewPartitions.remove(p)); - assertEquals(getExpectedSubtaskIndex(p, startIndex, numConsumers), 2); + assertThat(allNewPartitions.remove(p)).isTrue(); + assertThat(2).isEqualTo(getExpectedSubtaskIndex(p, startIndex, numConsumers)); } // all partitions must have been assigned - assertTrue(allNewPartitions.isEmpty()); + assertThat(allNewPartitions).isEmpty(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -471,7 +476,7 @@ public void testDeterministicAssignmentWithDifferentFetchedPartitionOrdering() // ordering Collections.sort(discoveredPartitions, new KafkaTopicPartition.Comparator()); Collections.sort(discoveredPartitionsOutOfOrder, new KafkaTopicPartition.Comparator()); - assertEquals(discoveredPartitions, discoveredPartitionsOutOfOrder); + assertThat(discoveredPartitionsOutOfOrder).isEqualTo(discoveredPartitions); } } @@ -507,15 +512,15 @@ public void testNonContiguousPartitionIdDiscovery() throws Exception { partitionDiscoverer.open(); List discoveredPartitions1 = partitionDiscoverer.discoverPartitions(); - assertEquals(2, discoveredPartitions1.size()); - assertTrue(discoveredPartitions1.contains(new KafkaTopicPartition("test-topic", 1))); - assertTrue(discoveredPartitions1.contains(new KafkaTopicPartition("test-topic", 4))); + assertThat(discoveredPartitions1).hasSize(2); + assertThat(discoveredPartitions1).contains(new KafkaTopicPartition("test-topic", 1)); + assertThat(discoveredPartitions1).contains(new KafkaTopicPartition("test-topic", 4)); List discoveredPartitions2 = partitionDiscoverer.discoverPartitions(); - assertEquals(3, discoveredPartitions2.size()); - assertTrue(discoveredPartitions2.contains(new KafkaTopicPartition("test-topic", 0))); - assertTrue(discoveredPartitions2.contains(new KafkaTopicPartition("test-topic", 2))); - assertTrue(discoveredPartitions2.contains(new KafkaTopicPartition("test-topic", 3))); + assertThat(discoveredPartitions2).hasSize(3); + assertThat(discoveredPartitions2).contains(new KafkaTopicPartition("test-topic", 0)); + assertThat(discoveredPartitions2).contains(new KafkaTopicPartition("test-topic", 2)); + assertThat(discoveredPartitions2).contains(new KafkaTopicPartition("test-topic", 3)); } private boolean contains(List partitions, int partition) { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java index 3adc3ffaaeb64..8697b1486a25a 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/ClosableBlockingQueueTest.java @@ -26,14 +26,8 @@ import java.util.concurrent.atomic.AtomicReference; import static java.util.Arrays.asList; -import static java.util.Collections.emptyList; -import static java.util.Collections.singletonList; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests for the {@link ClosableBlockingQueue}. */ public class ClosableBlockingQueueTest { @@ -48,22 +42,22 @@ public void testCreateQueueHashCodeEquals() { ClosableBlockingQueue queue1 = new ClosableBlockingQueue<>(); ClosableBlockingQueue queue2 = new ClosableBlockingQueue<>(22); - assertTrue(queue1.isOpen()); - assertTrue(queue2.isOpen()); - assertTrue(queue1.isEmpty()); - assertTrue(queue2.isEmpty()); - assertEquals(0, queue1.size()); - assertEquals(0, queue2.size()); + assertThat(queue1.isOpen()).isTrue(); + assertThat(queue2.isOpen()).isTrue(); + assertThat(queue1.isEmpty()).isTrue(); + assertThat(queue2.isEmpty()).isTrue(); + assertThat(queue1.size()).isEqualTo(0); + assertThat(queue2.size()).isEqualTo(0); - assertTrue(queue1.hashCode() == queue2.hashCode()); + assertThat(queue1.hashCode()).isEqualTo(queue2.hashCode()); //noinspection EqualsWithItself - assertTrue(queue1.equals(queue1)); + assertThat(queue1.equals(queue1)).isTrue(); //noinspection EqualsWithItself - assertTrue(queue2.equals(queue2)); - assertTrue(queue1.equals(queue2)); + assertThat(queue2.equals(queue2)).isTrue(); + assertThat(queue1.equals(queue2)).isTrue(); - assertNotNull(queue1.toString()); - assertNotNull(queue2.toString()); + assertThat(queue1.toString()).isNotNull(); + assertThat(queue2.toString()).isNotNull(); List elements = new ArrayList<>(); elements.add("a"); @@ -74,22 +68,22 @@ public void testCreateQueueHashCodeEquals() { ClosableBlockingQueue queue4 = new ClosableBlockingQueue<>(asList("a", "b", "c")); - assertTrue(queue3.isOpen()); - assertTrue(queue4.isOpen()); - assertFalse(queue3.isEmpty()); - assertFalse(queue4.isEmpty()); - assertEquals(3, queue3.size()); - assertEquals(3, queue4.size()); + assertThat(queue3.isOpen()).isTrue(); + assertThat(queue4.isOpen()).isTrue(); + assertThat(queue3.isEmpty()).isFalse(); + assertThat(queue4.isEmpty()).isFalse(); + assertThat(queue3.size()).isEqualTo(3); + assertThat(queue4.size()).isEqualTo(3); - assertTrue(queue3.hashCode() == queue4.hashCode()); + assertThat(queue3.hashCode()).isEqualTo(queue4.hashCode()); //noinspection EqualsWithItself - assertTrue(queue3.equals(queue3)); + assertThat(queue3.equals(queue3)).isTrue(); //noinspection EqualsWithItself - assertTrue(queue4.equals(queue4)); - assertTrue(queue3.equals(queue4)); + assertThat(queue4.equals(queue4)).isTrue(); + assertThat(queue3.equals(queue4)).isTrue(); - assertNotNull(queue3.toString()); - assertNotNull(queue4.toString()); + assertThat(queue3.toString()).isNotNull(); + assertThat(queue4.toString()).isNotNull(); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -100,12 +94,12 @@ public void testCreateQueueHashCodeEquals() { public void testCloseEmptyQueue() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); - assertTrue(queue.isOpen()); - assertTrue(queue.close()); - assertFalse(queue.isOpen()); + assertThat(queue.isOpen()).isTrue(); + assertThat(queue.close()).isTrue(); + assertThat(queue.isOpen()).isFalse(); - assertFalse(queue.addIfOpen("element")); - assertTrue(queue.isEmpty()); + assertThat(queue.addIfOpen("element")).isFalse(); + assertThat(queue.isEmpty()).isTrue(); try { queue.add("some element"); @@ -123,23 +117,23 @@ public void testCloseEmptyQueue() { public void testCloseNonEmptyQueue() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(asList(1, 2, 3)); - assertTrue(queue.isOpen()); + assertThat(queue.isOpen()).isTrue(); - assertFalse(queue.close()); - assertFalse(queue.close()); + assertThat(queue.close()).isFalse(); + assertThat(queue.close()).isFalse(); queue.poll(); - assertFalse(queue.close()); - assertFalse(queue.close()); + assertThat(queue.close()).isFalse(); + assertThat(queue.close()).isFalse(); queue.pollBatch(); - assertTrue(queue.close()); - assertFalse(queue.isOpen()); + assertThat(queue.close()).isTrue(); + assertThat(queue.isOpen()).isFalse(); - assertFalse(queue.addIfOpen(42)); - assertTrue(queue.isEmpty()); + assertThat(queue.addIfOpen(42)).isFalse(); + assertThat(queue.isEmpty()).isTrue(); try { queue.add(99); @@ -158,41 +152,41 @@ public void testPeekAndPoll() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); - assertNull(queue.peek()); - assertNull(queue.peek()); - assertNull(queue.poll()); - assertNull(queue.poll()); + assertThat(queue.peek()).isNull(); + assertThat(queue.peek()).isNull(); + assertThat(queue.poll()).isNull(); + assertThat(queue.poll()).isNull(); - assertEquals(0, queue.size()); + assertThat(queue.size()).isEqualTo(0); queue.add("a"); queue.add("b"); queue.add("c"); - assertEquals(3, queue.size()); + assertThat(queue.size()).isEqualTo(3); - assertEquals("a", queue.peek()); - assertEquals("a", queue.peek()); - assertEquals("a", queue.peek()); + assertThat(queue.peek()).isEqualTo("a"); + assertThat(queue.peek()).isEqualTo("a"); + assertThat(queue.peek()).isEqualTo("a"); - assertEquals(3, queue.size()); + assertThat(queue.size()).isEqualTo(3); - assertEquals("a", queue.poll()); - assertEquals("b", queue.poll()); + assertThat(queue.poll()).isEqualTo("a"); + assertThat(queue.poll()).isEqualTo("b"); - assertEquals(1, queue.size()); + assertThat(queue.size()).isEqualTo(1); - assertEquals("c", queue.peek()); - assertEquals("c", queue.peek()); + assertThat(queue.peek()).isEqualTo("c"); + assertThat(queue.peek()).isEqualTo("c"); - assertEquals("c", queue.poll()); + assertThat(queue.poll()).isEqualTo("c"); - assertEquals(0, queue.size()); - assertNull(queue.poll()); - assertNull(queue.peek()); - assertNull(queue.peek()); + assertThat(queue.size()).isEqualTo(0); + assertThat(queue.poll()).isNull(); + assertThat(queue.peek()).isNull(); + assertThat(queue.peek()).isNull(); - assertTrue(queue.close()); + assertThat(queue.close()).isTrue(); try { queue.peek(); @@ -218,20 +212,20 @@ public void testPollBatch() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); - assertNull(queue.pollBatch()); + assertThat(queue.pollBatch()).isNull(); queue.add("a"); queue.add("b"); - assertEquals(asList("a", "b"), queue.pollBatch()); - assertNull(queue.pollBatch()); + assertThat(queue.pollBatch()).isEqualTo(asList("a", "b")); + assertThat(queue.pollBatch()).isNull(); queue.add("c"); - assertEquals(singletonList("c"), queue.pollBatch()); - assertNull(queue.pollBatch()); + assertThat(queue.pollBatch()).containsExactly("c"); + assertThat(queue.pollBatch()).isNull(); - assertTrue(queue.close()); + assertThat(queue.close()).isTrue(); try { queue.pollBatch(); @@ -250,11 +244,11 @@ public void testGetElementBlocking() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); - assertNull(queue.getElementBlocking(1)); - assertNull(queue.getElementBlocking(3)); - assertNull(queue.getElementBlocking(2)); + assertThat(queue.getElementBlocking(1)).isNull(); + assertThat(queue.getElementBlocking(3)).isNull(); + assertThat(queue.getElementBlocking(2)).isNull(); - assertEquals(0, queue.size()); + assertThat(queue.size()).isEqualTo(0); queue.add("a"); queue.add("b"); @@ -263,25 +257,25 @@ public void testGetElementBlocking() { queue.add("e"); queue.add("f"); - assertEquals(6, queue.size()); + assertThat(queue.size()).isEqualTo(6); - assertEquals("a", queue.getElementBlocking(99)); - assertEquals("b", queue.getElementBlocking()); + assertThat(queue.getElementBlocking(99)).isEqualTo("a"); + assertThat(queue.getElementBlocking()).isEqualTo("b"); - assertEquals(4, queue.size()); + assertThat(queue.size()).isEqualTo(4); - assertEquals("c", queue.getElementBlocking(0)); - assertEquals("d", queue.getElementBlocking(1000000)); - assertEquals("e", queue.getElementBlocking()); - assertEquals("f", queue.getElementBlocking(1786598)); + assertThat(queue.getElementBlocking(0)).isEqualTo("c"); + assertThat(queue.getElementBlocking(1000000)).isEqualTo("d"); + assertThat(queue.getElementBlocking()).isEqualTo("e"); + assertThat(queue.getElementBlocking(1786598)).isEqualTo("f"); - assertEquals(0, queue.size()); + assertThat(queue.size()).isEqualTo(0); - assertNull(queue.getElementBlocking(1)); - assertNull(queue.getElementBlocking(3)); - assertNull(queue.getElementBlocking(2)); + assertThat(queue.getElementBlocking(1)).isNull(); + assertThat(queue.getElementBlocking(3)).isNull(); + assertThat(queue.getElementBlocking(2)).isNull(); - assertTrue(queue.close()); + assertThat(queue.close()).isTrue(); try { queue.getElementBlocking(); @@ -307,37 +301,37 @@ public void testGetBatchBlocking() { try { ClosableBlockingQueue queue = new ClosableBlockingQueue<>(); - assertEquals(emptyList(), queue.getBatchBlocking(1)); - assertEquals(emptyList(), queue.getBatchBlocking(3)); - assertEquals(emptyList(), queue.getBatchBlocking(2)); + assertThat(queue.getBatchBlocking(1)).isEmpty(); + assertThat(queue.getBatchBlocking(3)).isEmpty(); + assertThat(queue.getBatchBlocking(2)).isEmpty(); queue.add("a"); queue.add("b"); - assertEquals(asList("a", "b"), queue.getBatchBlocking(900000009)); + assertThat(queue.getBatchBlocking(900000009)).isEqualTo(asList("a", "b")); queue.add("c"); queue.add("d"); - assertEquals(asList("c", "d"), queue.getBatchBlocking()); + assertThat(queue.getBatchBlocking()).isEqualTo(asList("c", "d")); - assertEquals(emptyList(), queue.getBatchBlocking(2)); + assertThat(queue.getBatchBlocking(2)).isEmpty(); queue.add("e"); - assertEquals(singletonList("e"), queue.getBatchBlocking(0)); + assertThat(queue.getBatchBlocking(0)).containsExactly("e"); queue.add("f"); - assertEquals(singletonList("f"), queue.getBatchBlocking(1000000000)); + assertThat(queue.getBatchBlocking(1000000000)).containsExactly("f"); - assertEquals(0, queue.size()); + assertThat(queue.size()).isEqualTo(0); - assertEquals(emptyList(), queue.getBatchBlocking(1)); - assertEquals(emptyList(), queue.getBatchBlocking(3)); - assertEquals(emptyList(), queue.getBatchBlocking(2)); + assertThat(queue.getBatchBlocking(1)).isEmpty(); + assertThat(queue.getBatchBlocking(3)).isEmpty(); + assertThat(queue.getBatchBlocking(2)).isEmpty(); - assertTrue(queue.close()); + assertThat(queue.close()).isTrue(); try { queue.getBatchBlocking(); @@ -477,7 +471,8 @@ public void run() { { Integer next = queue.getElementBlocking(1); if (next != null) { - assertEquals(nextExpected, next.intValue()); + assertThat(next.intValue()) + .isEqualTo(nextExpected); nextExpected++; count++; } @@ -488,8 +483,9 @@ public void run() { List nextList = queue.getBatchBlocking(); for (Integer next : nextList) { - assertNotNull(next); - assertEquals(nextExpected, next.intValue()); + assertThat(next).isNotNull(); + assertThat(next.intValue()) + .isEqualTo(nextExpected); nextExpected++; count++; } @@ -501,9 +497,9 @@ public void run() { queue.getBatchBlocking(1); if (nextList != null) { for (Integer next : nextList) { - assertNotNull(next); - assertEquals( - nextExpected, next.intValue()); + assertThat(next).isNotNull(); + assertThat(next.intValue()) + .isEqualTo(nextExpected); nextExpected++; count++; } @@ -514,7 +510,8 @@ public void run() { { Integer next = queue.poll(); if (next != null) { - assertEquals(nextExpected, next.intValue()); + assertThat(next.intValue()) + .isEqualTo(nextExpected); nextExpected++; count++; } @@ -525,9 +522,9 @@ public void run() { List nextList = queue.pollBatch(); if (nextList != null) { for (Integer next : nextList) { - assertNotNull(next); - assertEquals( - nextExpected, next.intValue()); + assertThat(next).isNotNull(); + assertThat(next.intValue()) + .isEqualTo(nextExpected); nextExpected++; count++; } @@ -537,8 +534,9 @@ public void run() { default: { Integer next = queue.getElementBlocking(); - assertNotNull(next); - assertEquals(nextExpected, next.intValue()); + assertThat(next).isNotNull(); + assertThat(next.intValue()) + .isEqualTo(nextExpected); nextExpected++; count++; } @@ -552,7 +550,7 @@ public void run() { } } catch (IllegalStateException e) { // we get this once the queue is closed - assertEquals(numElements, count); + assertThat(count).isEqualTo(numElements); } } catch (Throwable t) { pollErrorRef.set(t); @@ -609,7 +607,7 @@ public void run() { @SuppressWarnings("ThrowableResultOfMethodCallIgnored") Throwable cause = errorRef.get(); - assertTrue(cause instanceof IllegalStateException); + assertThat(cause).isInstanceOf(IllegalStateException.class); } private interface QueueCall { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java index b723da48e5977..30e651691fc7d 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicPartitionTest.java @@ -23,9 +23,8 @@ import java.lang.reflect.Field; import java.lang.reflect.Modifier; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests for the {@link KafkaTopicPartition}. */ public class KafkaTopicPartitionTest { @@ -41,15 +40,15 @@ public void validateUid() { return; } - assertTrue(Modifier.isStatic(uidField.getModifiers())); - assertTrue(Modifier.isFinal(uidField.getModifiers())); - assertTrue(Modifier.isPrivate(uidField.getModifiers())); + assertThat(Modifier.isStatic(uidField.getModifiers())).isTrue(); + assertThat(Modifier.isFinal(uidField.getModifiers())).isTrue(); + assertThat(Modifier.isPrivate(uidField.getModifiers())).isTrue(); - assertEquals(long.class, uidField.getType()); + assertThat(uidField.getType()).isEqualTo(long.class); // the UID has to be constant to make sure old checkpoints/savepoints can be read try { - assertEquals(722083576322742325L, uidField.getLong(null)); + assertThat(uidField.getLong(null)).isEqualTo(722083576322742325L); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java index 07323dd9ee5dd..bb029d85e3cb7 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/internals/KafkaTopicsDescriptorTest.java @@ -17,7 +17,6 @@ package org.apache.flink.streaming.connectors.kafka.internals; -import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -27,6 +26,8 @@ import java.util.List; import java.util.regex.Pattern; +import static org.assertj.core.api.Assertions.assertThat; + /** Tests for the {@link KafkaTopicsDescriptor}. */ @RunWith(Parameterized.class) public class KafkaTopicsDescriptorTest { @@ -60,6 +61,6 @@ public void testIsMatchingTopic() { KafkaTopicsDescriptor topicsDescriptor = new KafkaTopicsDescriptor(fixedTopics, topicPattern); - Assert.assertEquals(expected, topicsDescriptor.isMatchingTopic(topic)); + assertThat(topicsDescriptor.isMatchingTopic(topic)).isEqualTo(expected); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java index 31b34ea4109c3..50b61af5eed5c 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/shuffle/KafkaShuffleITCase.java @@ -55,10 +55,8 @@ import static org.apache.flink.streaming.connectors.kafka.shuffle.FlinkKafkaShuffle.PARTITION_NUMBER; import static org.apache.flink.streaming.connectors.kafka.shuffle.FlinkKafkaShuffle.PRODUCER_PARALLELISM; import static org.apache.flink.test.util.TestUtils.tryExecute; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Simple End to End Test for Kafka. */ public class KafkaShuffleITCase extends KafkaShuffleTestBase { @@ -199,15 +197,14 @@ public void testWatermarkBroadcasting() throws Exception { Map> watermarks = new HashMap<>(); for (ConsumerRecord consumerRecord : records) { - assertNull(consumerRecord.key()); + assertThat(consumerRecord.key()).isNull(); KafkaShuffleElement element = deserializer.deserialize(consumerRecord); if (element.isRecord()) { KafkaShuffleRecord> record = element.asRecord(); - assertEquals( - record.getValue().f1.longValue(), - INIT_TIMESTAMP + record.getValue().f0); - assertEquals( - record.getTimestamp().longValue(), record.getValue().f1.longValue()); + assertThat(INIT_TIMESTAMP + record.getValue().f0) + .isEqualTo(record.getValue().f1.longValue()); + assertThat(record.getValue().f1.longValue()) + .isEqualTo(record.getTimestamp().longValue()); } else if (element.isWatermark()) { KafkaShuffleWatermark watermark = element.asWatermark(); watermarks.computeIfAbsent(watermark.getSubtask(), k -> new ArrayList<>()); @@ -231,14 +228,14 @@ public void testWatermarkBroadcasting() throws Exception { // Besides, watermarks from the same producer sub task should keep in order. for (List subTaskWatermarks : watermarks.values()) { int index = 0; - assertEquals(numElementsPerProducer + 1, subTaskWatermarks.size()); + assertThat(subTaskWatermarks).hasSize(numElementsPerProducer + 1); for (KafkaShuffleWatermark watermark : subTaskWatermarks) { if (index == numElementsPerProducer) { // the last element is the watermark that signifies end-of-event-time - assertEquals( - watermark.getWatermark(), Watermark.MAX_WATERMARK.getTimestamp()); + assertThat(Watermark.MAX_WATERMARK.getTimestamp()) + .isEqualTo(watermark.getWatermark()); } else { - assertEquals(watermark.getWatermark(), INIT_TIMESTAMP + index++); + assertThat(INIT_TIMESTAMP + index++).isEqualTo(watermark.getWatermark()); } } } @@ -390,7 +387,7 @@ private void testRecordSerDe(TimeCharacteristic timeCharacteristic) throws Excep case ProcessingTime: // NonTimestampContext, no intermediate watermarks, and one end-of-event-time // watermark - assertEquals(records.size(), numElementsPerProducer + 1); + assertThat(records.size()).isEqualTo(numElementsPerProducer + 1); break; case IngestionTime: // IngestionTime uses AutomaticWatermarkContext and it emits a watermark after every @@ -401,7 +398,7 @@ private void testRecordSerDe(TimeCharacteristic timeCharacteristic) throws Excep // ManualWatermarkContext // `numElementsPerProducer` records, `numElementsPerProducer` watermarks, and one // end-of-event-time watermark - assertEquals(records.size(), numElementsPerProducer * 2 + 1); + assertThat(records.size()).isEqualTo(numElementsPerProducer * 2 + 1); break; default: fail("unknown TimeCharacteristic type"); @@ -415,50 +412,50 @@ private void testRecordSerDe(TimeCharacteristic timeCharacteristic) throws Excep int recordIndex = 0; int watermarkIndex = 0; for (ConsumerRecord consumerRecord : records) { - assertNull(consumerRecord.key()); + assertThat(consumerRecord.key()).isNull(); KafkaShuffleElement element = deserializer.deserialize(consumerRecord); if (element.isRecord()) { KafkaShuffleRecord> record = element.asRecord(); switch (timeCharacteristic) { case ProcessingTime: - assertNull(record.getTimestamp()); + assertThat(record.getTimestamp()).isNull(); break; case IngestionTime: - assertNotNull(record.getTimestamp()); + assertThat(record.getTimestamp()).isNotNull(); break; case EventTime: - assertEquals( - record.getTimestamp().longValue(), - record.getValue().f1.longValue()); + assertThat(record.getValue().f1.longValue()) + .isEqualTo(record.getTimestamp().longValue()); break; default: fail("unknown TimeCharacteristic type"); } - assertEquals(record.getValue().f0.intValue(), recordIndex); - assertEquals(record.getValue().f1.longValue(), INIT_TIMESTAMP + recordIndex); - assertEquals(record.getValue().f2.intValue(), 0); + assertThat(recordIndex).isEqualTo(record.getValue().f0.intValue()); + assertThat(INIT_TIMESTAMP + recordIndex) + .isEqualTo(record.getValue().f1.longValue()); + assertThat(0).isEqualTo(record.getValue().f2.intValue()); recordIndex++; } else if (element.isWatermark()) { KafkaShuffleWatermark watermark = element.asWatermark(); switch (timeCharacteristic) { case ProcessingTime: - assertEquals(watermark.getSubtask(), 0); + assertThat(watermark.getSubtask()).isEqualTo(0); // the last element is the watermark that signifies end-of-event-time - assertEquals(numElementsPerProducer, recordIndex); - assertEquals( - watermark.getWatermark(), Watermark.MAX_WATERMARK.getTimestamp()); + assertThat(recordIndex).isEqualTo(numElementsPerProducer); + assertThat(Watermark.MAX_WATERMARK.getTimestamp()) + .isEqualTo(watermark.getWatermark()); break; case IngestionTime: break; case EventTime: - assertEquals(watermark.getSubtask(), 0); + assertThat(watermark.getSubtask()).isEqualTo(0); if (watermarkIndex == recordIndex) { // the last element is the watermark that signifies end-of-event-time - assertEquals( - watermark.getWatermark(), - Watermark.MAX_WATERMARK.getTimestamp()); + assertThat(Watermark.MAX_WATERMARK.getTimestamp()) + .isEqualTo(watermark.getWatermark()); } else { - assertEquals(watermark.getWatermark(), INIT_TIMESTAMP + watermarkIndex); + assertThat(INIT_TIMESTAMP + watermarkIndex) + .isEqualTo(watermark.getWatermark()); } break; default: diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java index 4ea4072fcaf31..e9959087c951a 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaConnectorOptionsUtilTest.java @@ -34,11 +34,8 @@ import static org.apache.flink.table.api.DataTypes.INT; import static org.apache.flink.table.api.DataTypes.ROW; import static org.apache.flink.table.api.DataTypes.STRING; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; -import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test for {@link KafkaConnectorOptionsUtil}. */ public class KafkaConnectorOptionsUtilTest { @@ -58,8 +55,8 @@ public void testFormatProjection() { final Configuration config = Configuration.fromMap(options); - assertArrayEquals(new int[] {3, 1}, createKeyFormatProjection(config, dataType)); - assertArrayEquals(new int[] {0, 2}, createValueFormatProjection(config, dataType)); + assertThat(createKeyFormatProjection(config, dataType)).isEqualTo(new int[] {3, 1}); + assertThat(createValueFormatProjection(config, dataType)).isEqualTo(new int[] {0, 2}); } @Test @@ -69,17 +66,11 @@ public void testMissingKeyFormatProjection() { final Configuration config = Configuration.fromMap(options); - try { - createKeyFormatProjection(config, dataType); - fail(); - } catch (ValidationException e) { - assertThat( - e, - hasMessage( - equalTo( - "A key format 'key.format' requires the declaration of one or more " - + "of key fields using 'key.fields'."))); - } + assertThatThrownBy(() -> createKeyFormatProjection(config, dataType)) + .isInstanceOf(ValidationException.class) + .hasMessage( + "A key format 'key.format' requires the declaration of one or more " + + "of key fields using 'key.fields'."); } @Test @@ -90,20 +81,14 @@ public void testInvalidKeyFormatFieldProjection() { final Configuration config = Configuration.fromMap(options); - try { - createKeyFormatProjection(config, dataType); - fail(); - } catch (ValidationException e) { - assertThat( - e, - hasMessage( - equalTo( - "Could not find the field 'non_existing' in the table schema for " - + "usage in the key format. A key field must be a regular, " - + "physical column. The following columns can be selected " - + "in the 'key.fields' option:\n" - + "[id, name]"))); - } + assertThatThrownBy(() -> createKeyFormatProjection(config, dataType)) + .isInstanceOf(ValidationException.class) + .hasMessage( + "Could not find the field 'non_existing' in the table schema for " + + "usage in the key format. A key field must be a regular, " + + "physical column. The following columns can be selected " + + "in the 'key.fields' option:\n" + + "[id, name]"); } @Test @@ -116,17 +101,11 @@ public void testInvalidKeyFormatPrefixProjection() { final Configuration config = Configuration.fromMap(options); - try { - createKeyFormatProjection(config, dataType); - fail(); - } catch (ValidationException e) { - assertThat( - e, - hasMessage( - equalTo( - "All fields in 'key.fields' must be prefixed with 'k_' when option " - + "'key.fields-prefix' is set but field 'part_2' is not prefixed."))); - } + assertThatThrownBy(() -> createKeyFormatProjection(config, dataType)) + .isInstanceOf(ValidationException.class) + .hasMessage( + "All fields in 'key.fields' must be prefixed with 'k_' when option " + + "'key.fields-prefix' is set but field 'part_2' is not prefixed."); } @Test @@ -138,17 +117,11 @@ public void testInvalidValueFormatProjection() { final Configuration config = Configuration.fromMap(options); - try { - createValueFormatProjection(config, dataType); - fail(); - } catch (ValidationException e) { - assertThat( - e, - hasMessage( - equalTo( - "A key prefix is not allowed when option 'value.fields-include' " - + "is set to 'ALL'. Set it to 'EXCEPT_KEY' instead to avoid field overlaps."))); - } + assertThatThrownBy(() -> createValueFormatProjection(config, dataType)) + .isInstanceOf(ValidationException.class) + .hasMessage( + "A key prefix is not allowed when option 'value.fields-include' " + + "is set to 'ALL'. Set it to 'EXCEPT_KEY' instead to avoid field overlaps."); } // -------------------------------------------------------------------------------------------- diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java index 4b0a863b5fec7..0b908abfa5f32 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaDynamicTableFactoryTest.java @@ -690,7 +690,7 @@ private void verifyEncoderSubject( } if (avroFormats.contains(keyFormat)) { - assert sink.keyEncodingFormat != null; + assertThat(sink.keyEncodingFormat).isNotNull(); SerializationSchema actualKeyEncoder = sink.keyEncodingFormat.createRuntimeEncoder( new SinkRuntimeProviderContext(false), SCHEMA_DATA_TYPE); diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java index cfd5a46a09e5e..273df6465c84f 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java @@ -61,9 +61,9 @@ import static org.apache.flink.table.utils.TableTestMatchers.deepEqualTo; import static org.apache.flink.util.CollectionUtil.entry; import static org.apache.flink.util.CollectionUtil.map; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.HamcrestCondition.matching; /** Basic IT cases for the Kafka table source and sink. */ @RunWith(Parameterized.class) @@ -171,7 +171,7 @@ public void testKafkaSourceSink() throws Exception { "+I(2019-12-12 00:00:05.000,2019-12-12,00:00:03,2019-12-12 00:00:04.004,3,50.00)", "+I(2019-12-12 00:00:10.000,2019-12-12,00:00:05,2019-12-12 00:00:06.006,2,5.33)"); - assertEquals(expected, TestingSinkFunction.rows); + assertThat(TestingSinkFunction.rows).isEqualTo(expected); // ------------- cleanup ------------------- @@ -262,7 +262,7 @@ public void testKafkaTableWithMultipleTopics() throws Exception { } List expected = Arrays.asList("+I(Dollar)", "+I(Dummy)", "+I(Euro)", "+I(Yen)"); TestingSinkFunction.rows.sort(Comparator.naturalOrder()); - assertEquals(expected, TestingSinkFunction.rows); + assertThat(TestingSinkFunction.rows).isEqualTo(expected); // ------------- cleanup ------------------- topics.forEach(super::deleteTestTopic); @@ -353,7 +353,7 @@ public void testKafkaSourceSinkWithMetadata() throws Exception { topic, true)); - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); // ------------- cleanup ------------------- @@ -434,7 +434,7 @@ public void testKafkaSourceSinkWithKeyAndPartialValue() throws Exception { 43, "payload 3")); - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); // ------------- cleanup ------------------- @@ -512,7 +512,7 @@ public void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { 102L, "payload 3")); - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); // ------------- cleanup ------------------- @@ -627,7 +627,7 @@ public void testKafkaTemporalJoinChangelog() throws Exception { "+I[o_005, 2020-10-01T18:00, p_001, 2020-10-01T18:00, 11.9900, Leonard, scooter, 10, 119.9000]", "+I[o_006, 2020-10-01T18:00, null, null, null, Leonard, null, 10, null]"); - assertEquals(expected, result); + assertThat(result).isEqualTo(expected); // ------------- cleanup ------------------- diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestUtils.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestUtils.java index bfc21d92f2b4e..b0348ff20fcd0 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestUtils.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableTestUtils.java @@ -42,8 +42,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; /** Utils for kafka table tests. */ public class KafkaTableTestUtils { @@ -70,7 +70,7 @@ public static List collectRows(Table table, int expectedSize) throws Except public static List readLines(String resource) throws IOException { final URL url = KafkaChangelogTableITCase.class.getClassLoader().getResource(resource); - assert url != null; + assertThat(url).isNotNull(); Path path = new File(url.getFile()).toPath(); return Files.readAllLines(path); } @@ -99,12 +99,12 @@ public static void comparedWithKeyAndOrder( actualData.computeIfAbsent(key, k -> new LinkedList<>()).add(row); } // compare key first - assertEquals("Actual result: " + actual, expectedData.size(), actualData.size()); + assertThat(actualData).as("Actual result: " + actual).hasSize(expectedData.size()); // compare by value for (Row key : expectedData.keySet()) { - assertThat( - actualData.get(key), - TableTestMatchers.deepEqualTo(expectedData.get(key), false)); + assertThat(actualData.get(key)) + .satisfies( + matching(TableTestMatchers.deepEqualTo(expectedData.get(key), false))); } } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java index 57a5987618331..74de65db0eef4 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/ReducingUpsertWriterTest.java @@ -50,8 +50,7 @@ import static org.apache.flink.types.RowKind.DELETE; import static org.apache.flink.types.RowKind.INSERT; import static org.apache.flink.types.RowKind.UPDATE_AFTER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link ReducingUpsertWriter}. */ @RunWith(Parameterized.class) @@ -155,7 +154,7 @@ public void testWriteData() throws Exception { // write 4 records which doesn't trigger batch size writeData(bufferedWriter, new ReusableIterator(0, 4)); - assertTrue(writer.rowDataCollectors.isEmpty()); + assertThat(writer.rowDataCollectors).isEmpty(); // write one more record, and should flush the buffer writeData(bufferedWriter, new ReusableIterator(7, 1)); @@ -212,7 +211,7 @@ public void testWriteData() throws Exception { writer.rowDataCollectors.clear(); // write remaining data, and they are still buffered writeData(bufferedWriter, new ReusableIterator(4, 3)); - assertTrue(writer.rowDataCollectors.isEmpty()); + assertThat(writer.rowDataCollectors).isEmpty(); } @Test @@ -271,9 +270,9 @@ private void compareCompactedResult( actualMap.computeIfAbsent(id, key -> new ArrayList<>()).add(rowData); } - assertEquals(expected.size(), actualMap.size()); + assertThat(actualMap).hasSize(expected.size()); for (Integer id : expected.keySet()) { - assertEquals(expected.get(id), actualMap.get(id)); + assertThat(actualMap.get(id)).isEqualTo(expected.get(id)); } } @@ -338,9 +337,8 @@ private static class MockedSinkWriter @Override public void write(RowData element, Context context) throws IOException, InterruptedException { - assertEquals( - element.getTimestamp(TIMESTAMP_INDICES, 3).toInstant(), - Instant.ofEpochMilli(context.timestamp())); + assertThat(Instant.ofEpochMilli(context.timestamp())) + .isEqualTo(element.getTimestamp(TIMESTAMP_INDICES, 3).toInstant()); rowDataCollectors.add(element); } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java index 3ac5ef6dbb4f8..79884ad092963 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaDynamicTableFactoryTest.java @@ -79,10 +79,7 @@ import static org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.AVRO_CONFLUENT; import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink; import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSource; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Test for {@link UpsertKafkaDynamicTableFactory}. */ public class UpsertKafkaDynamicTableFactoryTest extends TestLogger { @@ -161,7 +158,7 @@ public void testTableSource() { null, SOURCE_TOPIC, UPSERT_KAFKA_SOURCE_PROPERTIES); - assertEquals(actualSource, expectedSource); + assertThat(actualSource).isEqualTo(expectedSource); final KafkaDynamicSource actualUpsertKafkaSource = (KafkaDynamicSource) actualSource; ScanTableSource.ScanRuntimeProvider provider = @@ -190,15 +187,15 @@ public void testTableSink() { // Test sink format. final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink; - assertEquals(expectedSink, actualSink); + assertThat(actualSink).isEqualTo(expectedSink); // Test kafka producer. DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false)); - assertThat(provider, instanceOf(SinkV2Provider.class)); + assertThat(provider).isInstanceOf(SinkV2Provider.class); final SinkV2Provider sinkFunctionProvider = (SinkV2Provider) provider; final Sink sink = sinkFunctionProvider.createSink(); - assertThat(sink, instanceOf(KafkaSink.class)); + assertThat(sink).isInstanceOf(KafkaSink.class); } @SuppressWarnings("rawtypes") @@ -231,12 +228,12 @@ public void testBufferedTableSink() { // Test sink format. final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink; - assertEquals(expectedSink, actualSink); + assertThat(actualSink).isEqualTo(expectedSink); // Test kafka producer. DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false)); - assertThat(provider, instanceOf(DataStreamSinkProvider.class)); + assertThat(provider).isInstanceOf(DataStreamSinkProvider.class); final DataStreamSinkProvider sinkProvider = (DataStreamSinkProvider) provider; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); sinkProvider.consumeDataStream( @@ -250,10 +247,10 @@ public void testBufferedTableSink() { new RuntimeException( "Expected operator with name Sink in stream graph.")) .getOperatorFactory(); - assertThat(sinkOperatorFactory, instanceOf(SinkWriterOperatorFactory.class)); + assertThat(sinkOperatorFactory).isInstanceOf(SinkWriterOperatorFactory.class); org.apache.flink.api.connector.sink2.Sink sink = ((SinkWriterOperatorFactory) sinkOperatorFactory).getSink(); - assertThat(sink, instanceOf(ReducingUpsertSink.class)); + assertThat(sink).isInstanceOf(ReducingUpsertSink.class); } @Test @@ -276,14 +273,14 @@ public void testTableSinkWithParallelism() { DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, 100); - assertEquals(expectedSink, actualSink); + assertThat(actualSink).isEqualTo(expectedSink); final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false)); - assertThat(provider, instanceOf(SinkV2Provider.class)); + assertThat(provider).isInstanceOf(SinkV2Provider.class); final SinkV2Provider sinkProvider = (SinkV2Provider) provider; - assertTrue(sinkProvider.getParallelism().isPresent()); - assertEquals(100, (long) sinkProvider.getParallelism().get()); + assertThat(sinkProvider.getParallelism()).isPresent(); + assertThat((long) sinkProvider.getParallelism().get()).isEqualTo(100); } @Test @@ -369,18 +366,17 @@ private void verifyEncoderSubject( SerializationSchema actualValueEncoder = sink.valueEncodingFormat.createRuntimeEncoder( new SinkRuntimeProviderContext(false), SINK_SCHEMA.toSinkRowDataType()); - assertEquals( - createConfluentAvroSerSchema(rowType, expectedValueSubject), - actualValueEncoder); + assertThat(actualValueEncoder) + .isEqualTo(createConfluentAvroSerSchema(rowType, expectedValueSubject)); } if (AVRO_CONFLUENT.equals(keyFormat)) { - assert sink.keyEncodingFormat != null; + assertThat(sink.keyEncodingFormat).isNotNull(); SerializationSchema actualKeyEncoder = sink.keyEncodingFormat.createRuntimeEncoder( new SinkRuntimeProviderContext(false), SINK_SCHEMA.toSinkRowDataType()); - assertEquals( - createConfluentAvroSerSchema(rowType, expectedKeySubject), actualKeyEncoder); + assertThat(actualKeyEncoder) + .isEqualTo(createConfluentAvroSerSchema(rowType, expectedKeySubject)); } } @@ -648,7 +644,7 @@ private static KafkaDynamicSink createExpectedSink( } private void assertKafkaSource(ScanTableSource.ScanRuntimeProvider provider) { - assertThat(provider, instanceOf(DataStreamScanProvider.class)); + assertThat(provider).isInstanceOf(DataStreamScanProvider.class); final DataStreamScanProvider dataStreamScanProvider = (DataStreamScanProvider) provider; final Transformation transformation = dataStreamScanProvider @@ -656,11 +652,11 @@ private void assertKafkaSource(ScanTableSource.ScanRuntimeProvider provider) { n -> Optional.empty(), StreamExecutionEnvironment.createLocalEnvironment()) .getTransformation(); - assertThat(transformation, instanceOf(SourceTransformation.class)); + assertThat(transformation).isInstanceOf(SourceTransformation.class); SourceTransformation sourceTransformation = (SourceTransformation) transformation; - assertThat(sourceTransformation.getSource(), instanceOf(KafkaSource.class)); + assertThat(sourceTransformation.getSource()).isInstanceOf(KafkaSource.class); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java index 9b11e0b2d4a75..109f4402a638c 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/UpsertKafkaTableITCase.java @@ -49,7 +49,8 @@ import static org.apache.flink.streaming.connectors.kafka.table.KafkaTableTestUtils.waitingExpectedResults; import static org.apache.flink.table.planner.factories.TestValuesTableFactory.changelogRow; import static org.apache.flink.table.utils.TableTestMatchers.deepEqualTo; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; /** Upsert-kafka IT cases. */ @RunWith(Parameterized.class) @@ -187,7 +188,7 @@ public void testBufferedUpsertSink() throws Exception { LocalDateTime.parse("2020-03-11T13:12:11.120"), "payload")); - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); // ------------- cleanup ------------------- @@ -285,7 +286,7 @@ public void testSourceSinkWithKeyAndPartialValue() throws Exception { 42, "payload")); - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); // ------------- cleanup ------------------- @@ -380,7 +381,7 @@ public void testKafkaSourceSinkWithKeyAndFullValue() throws Exception { 100L, "payload")); - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); // ------------- cleanup ------------------- @@ -782,7 +783,7 @@ private void writeChangelogToUpsertKafkaWithMetadata(String userTable) throws Ex // we ignore the orders for easier comparing, as we already verified ordering in // testAggregate() - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); } private void temporalJoinUpsertKafka(String userTable) throws Exception { @@ -876,6 +877,6 @@ private void temporalJoinUpsertKafka(String userTable) throws Exception { format, userTable)), 7); - assertThat(result, deepEqualTo(expected, true)); + assertThat(result).satisfies(matching(deepEqualTo(expected, true))); } } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestPartitionDiscoverer.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestPartitionDiscoverer.java index 85a2c814764b8..5731273f89f85 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestPartitionDiscoverer.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/testutils/TestPartitionDiscoverer.java @@ -28,8 +28,7 @@ import java.util.ArrayList; import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Matchers.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -65,18 +64,19 @@ public TestPartitionDiscoverer( @Override protected List getAllTopics() { - assertTrue(topicsDescriptor.isTopicPattern()); + assertThat(topicsDescriptor.isTopicPattern()).isTrue(); return mockGetAllTopicsReturnSequence.get(getAllTopicsInvokeCount++); } @Override protected List getAllPartitionsForTopics(List topics) { if (topicsDescriptor.isFixedTopics()) { - assertEquals(topicsDescriptor.getFixedTopics(), topics); + assertThat(topics).isEqualTo(topicsDescriptor.getFixedTopics()); } else { - assertEquals( - mockGetAllTopicsReturnSequence.get(getAllPartitionsForTopicsInvokeCount - 1), - topics); + assertThat(topics) + .isEqualTo( + mockGetAllTopicsReturnSequence.get( + getAllPartitionsForTopicsInvokeCount - 1)); } return mockGetAllPartitionsForTopicsReturnSequence.get( getAllPartitionsForTopicsInvokeCount++); From 0726781f80201f006792599aa25b1baac50aaa45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 6 May 2022 11:11:00 +0200 Subject: [PATCH 07/16] [FLINK-27185][connector] Convert connector-kinesis module to assertj Co-authored-by: slinkydeveloper --- .../FlinkKinesisConsumerMigrationTest.java | 102 ++++++------ .../kinesis/FlinkKinesisConsumerTest.java | 78 ++++----- .../kinesis/FlinkKinesisITCase.java | 13 +- .../kinesis/FlinkKinesisProducerTest.java | 68 ++++---- .../internals/KinesisDataFetcherTest.java | 156 ++++++++---------- .../internals/ShardConsumerFanOutTest.java | 37 +++-- .../kinesis/internals/ShardConsumerTest.java | 15 +- .../internals/ShardConsumerTestUtils.java | 9 +- .../internals/publisher/RecordBatchTest.java | 26 +-- ...anOutRecordPublisherConfigurationTest.java | 28 ++-- .../fanout/FanOutRecordPublisherTest.java | 66 ++++---- .../fanout/StreamConsumerRegistrarTest.java | 19 +-- ...llingRecordPublisherConfigurationTest.java | 16 +- .../PollingRecordPublisherFactoryTest.java | 11 +- .../polling/PollingRecordPublisherTest.java | 18 +- ...ingRecordPublisherMetricsReporterTest.java | 12 +- .../ShardConsumerMetricsReporterTest.java | 13 +- .../model/DynamoDBStreamsShardHandleTest.java | 31 ++-- .../model/SentinelSequenceNumberTest.java | 4 +- .../kinesis/model/StartingPositionTest.java | 33 ++-- .../kinesis/model/StreamShardHandleTest.java | 20 +-- .../kinesis/proxy/KinesisProxyTest.java | 95 +++++------ .../proxy/KinesisProxyV2FactoryTest.java | 12 +- .../kinesis/proxy/KinesisProxyV2Test.java | 26 +-- .../table/KinesisDynamicTableFactoryTest.java | 16 +- .../FakeKinesisFanOutBehavioursFactory.java | 13 +- .../connectors/kinesis/util/AWSUtilTest.java | 45 +++-- .../kinesis/util/AwsV2UtilTest.java | 55 +++--- .../util/JobManagerWatermarkTrackerTest.java | 9 +- .../kinesis/util/KinesisConfigUtilTest.java | 38 +++-- .../kinesis/util/RecordEmitterTest.java | 12 +- .../util/StreamConsumerRegistrarUtilTest.java | 8 +- .../util/UniformShardAssignerTest.java | 8 +- .../kinesis/util/WatermarkTrackerTest.java | 15 +- 34 files changed, 561 insertions(+), 566 deletions(-) diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java index da041effe5aae..0099f48a64895 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerMigrationTest.java @@ -57,10 +57,7 @@ import java.util.Properties; import java.util.concurrent.atomic.AtomicReference; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** * Tests for checking whether {@link FlinkKinesisConsumer} can restore from snapshots that were done @@ -180,7 +177,7 @@ public void testRestoreWithEmptyState() throws Exception { consumerFunction.run(new TestSourceContext<>()); // assert that no state was restored - assertTrue(consumerFunction.getRestoredState().isEmpty()); + assertThat(consumerFunction.getRestoredState()).isEmpty(); // although the restore state is empty, the fetcher should still have been registered the // initial discovered shard; @@ -188,13 +185,13 @@ public void testRestoreWithEmptyState() throws Exception { // job wasn't running, // and therefore should be consumed from the earliest sequence number KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); - assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); - assertEquals( - TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); - assertFalse(restoredShardState.getStreamShardHandle().isClosed()); - assertEquals( - SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), - restoredShardState.getLastProcessedSequenceNum()); + assertThat(restoredShardState.getStreamShardHandle().getStreamName()) + .isEqualTo(TEST_STREAM_NAME); + assertThat(restoredShardState.getStreamShardHandle().getShard().getShardId()) + .isEqualTo(TEST_SHARD_ID); + assertThat(restoredShardState.getStreamShardHandle().isClosed()).isFalse(); + assertThat(restoredShardState.getLastProcessedSequenceNum()) + .isEqualTo(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get()); consumerOperator.close(); consumerOperator.cancel(); @@ -246,20 +243,22 @@ public void testRestore() throws Exception { consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored - assertNotEquals(null, consumerFunction.getRestoredState()); - assertEquals(1, consumerFunction.getRestoredState().size()); - assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); - assertEquals(1, fetcher.getSubscribedShardsState().size()); - assertEquals( - TEST_SEQUENCE_NUMBER, - fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum()); + assertThat(consumerFunction.getRestoredState()).isNotNull(); + assertThat(consumerFunction.getRestoredState()).hasSize(1); + assertThat(removeEquivalenceWrappers(consumerFunction.getRestoredState())) + .isEqualTo(TEST_STATE); + assertThat(fetcher.getSubscribedShardsState()).hasSize(1); + assertThat(fetcher.getSubscribedShardsState().get(0).getLastProcessedSequenceNum()) + .isEqualTo(TEST_SEQUENCE_NUMBER); KinesisStreamShardState restoredShardState = fetcher.getSubscribedShardsState().get(0); - assertEquals(TEST_STREAM_NAME, restoredShardState.getStreamShardHandle().getStreamName()); - assertEquals( - TEST_SHARD_ID, restoredShardState.getStreamShardHandle().getShard().getShardId()); - assertFalse(restoredShardState.getStreamShardHandle().isClosed()); - assertEquals(TEST_SEQUENCE_NUMBER, restoredShardState.getLastProcessedSequenceNum()); + assertThat(restoredShardState.getStreamShardHandle().getStreamName()) + .isEqualTo(TEST_STREAM_NAME); + assertThat(restoredShardState.getStreamShardHandle().getShard().getShardId()) + .isEqualTo(TEST_SHARD_ID); + assertThat(restoredShardState.getStreamShardHandle().isClosed()).isFalse(); + assertThat(restoredShardState.getLastProcessedSequenceNum()) + .isEqualTo(TEST_SEQUENCE_NUMBER); consumerOperator.close(); consumerOperator.cancel(); @@ -339,46 +338,43 @@ public void testRestoreWithReshardedStream() throws Exception { consumerFunction.run(new TestSourceContext<>()); // assert that state is correctly restored - assertNotEquals(null, consumerFunction.getRestoredState()); - assertEquals(1, consumerFunction.getRestoredState().size()); - assertEquals(TEST_STATE, removeEquivalenceWrappers(consumerFunction.getRestoredState())); + assertThat(consumerFunction.getRestoredState()).isNotNull(); + assertThat(consumerFunction.getRestoredState()).hasSize(1); + assertThat(removeEquivalenceWrappers(consumerFunction.getRestoredState())) + .isEqualTo(TEST_STATE); // assert that the fetcher is registered with all shards, including new shards - assertEquals(3, fetcher.getSubscribedShardsState().size()); + assertThat(fetcher.getSubscribedShardsState()).hasSize(3); KinesisStreamShardState restoredClosedShardState = fetcher.getSubscribedShardsState().get(0); - assertEquals( - TEST_STREAM_NAME, restoredClosedShardState.getStreamShardHandle().getStreamName()); - assertEquals( - TEST_SHARD_ID, - restoredClosedShardState.getStreamShardHandle().getShard().getShardId()); - assertTrue(restoredClosedShardState.getStreamShardHandle().isClosed()); - assertEquals(TEST_SEQUENCE_NUMBER, restoredClosedShardState.getLastProcessedSequenceNum()); + assertThat(restoredClosedShardState.getStreamShardHandle().getStreamName()) + .isEqualTo(TEST_STREAM_NAME); + assertThat(restoredClosedShardState.getStreamShardHandle().getShard().getShardId()) + .isEqualTo(TEST_SHARD_ID); + assertThat(restoredClosedShardState.getStreamShardHandle().isClosed()).isTrue(); + assertThat(restoredClosedShardState.getLastProcessedSequenceNum()) + .isEqualTo(TEST_SEQUENCE_NUMBER); KinesisStreamShardState restoredNewSplitShard1 = fetcher.getSubscribedShardsState().get(1); - assertEquals( - TEST_STREAM_NAME, restoredNewSplitShard1.getStreamShardHandle().getStreamName()); - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder(1), - restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId()); - assertFalse(restoredNewSplitShard1.getStreamShardHandle().isClosed()); + assertThat(restoredNewSplitShard1.getStreamShardHandle().getStreamName()) + .isEqualTo(TEST_STREAM_NAME); + assertThat(restoredNewSplitShard1.getStreamShardHandle().getShard().getShardId()) + .isEqualTo(KinesisShardIdGenerator.generateFromShardOrder(1)); + assertThat(restoredNewSplitShard1.getStreamShardHandle().isClosed()).isFalse(); // new shards should be consumed from the beginning - assertEquals( - SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), - restoredNewSplitShard1.getLastProcessedSequenceNum()); + assertThat(restoredNewSplitShard1.getLastProcessedSequenceNum()) + .isEqualTo(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get()); KinesisStreamShardState restoredNewSplitShard2 = fetcher.getSubscribedShardsState().get(2); - assertEquals( - TEST_STREAM_NAME, restoredNewSplitShard2.getStreamShardHandle().getStreamName()); - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder(2), - restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId()); - assertFalse(restoredNewSplitShard2.getStreamShardHandle().isClosed()); + assertThat(restoredNewSplitShard2.getStreamShardHandle().getStreamName()) + .isEqualTo(TEST_STREAM_NAME); + assertThat(restoredNewSplitShard2.getStreamShardHandle().getShard().getShardId()) + .isEqualTo(KinesisShardIdGenerator.generateFromShardOrder(2)); + assertThat(restoredNewSplitShard2.getStreamShardHandle().isClosed()).isFalse(); // new shards should be consumed from the beginning - assertEquals( - SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(), - restoredNewSplitShard2.getLastProcessedSequenceNum()); + assertThat(restoredNewSplitShard2.getLastProcessedSequenceNum()) + .isEqualTo(SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get()); consumerOperator.close(); consumerOperator.cancel(); diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java index d48e04ed4e69a..c817ef1b98e67 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java @@ -64,7 +64,6 @@ import com.amazonaws.services.kinesis.model.HashKeyRange; import com.amazonaws.services.kinesis.model.SequenceNumberRange; import com.amazonaws.services.kinesis.model.Shard; -import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Matchers; @@ -91,11 +90,10 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.notNullValue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -185,12 +183,12 @@ public void testUseRestoredStateForSnapshotIfFetcherNotInitialized() throws Exce // arbitrary checkpoint id and timestamp consumer.snapshotState(new StateSnapshotContextSynchronousImpl(123, 123)); - assertTrue(listState.isClearCalled()); + assertThat(listState.isClearCalled()).isTrue(); // the checkpointed list state should contain only the shards that it should subscribe to - assertEquals(globalUnionState.size() / 2, listState.getList().size()); - assertTrue(listState.getList().contains(globalUnionState.get(0))); - assertTrue(listState.getList().contains(globalUnionState.get(2))); + assertThat(listState.getList()).hasSize(globalUnionState.size() / 2); + assertThat(listState.getList()).contains(globalUnionState.get(0)); + assertThat(listState.getList()).contains(globalUnionState.get(2)); } @Test @@ -295,12 +293,12 @@ public void testListStateChangedAfterSnapshotState() throws Exception { mockedConsumer.snapshotState(mock(FunctionSnapshotContext.class)); - assertEquals(true, listState.clearCalled); - assertEquals(3, listState.getList().size()); + assertThat(listState.clearCalled).isTrue(); + assertThat(listState.getList()).hasSize(3); for (Tuple2 state : initialState) { for (Tuple2 currentState : listState.getList()) { - assertNotEquals(state, currentState); + assertThat(currentState).isNotEqualTo(state); } } @@ -309,7 +307,7 @@ public void testListStateChangedAfterSnapshotState() throws Exception { for (Tuple2 currentState : listState.getList()) { hasOneIsSame = hasOneIsSame || state.equals(currentState); } - assertEquals(true, hasOneIsSame); + assertThat(hasOneIsSame).isTrue(); } } @@ -630,17 +628,16 @@ public void testLegacyKinesisStreamShardToStreamShardMetadataConversion() { .withEndingSequenceNumber(endingSequenceNumber)); KinesisStreamShard kinesisStreamShard = new KinesisStreamShard(streamName, shard); - assertEquals( - streamShardMetadata, - KinesisStreamShard.convertToStreamShardMetadata(kinesisStreamShard)); + assertThat(KinesisStreamShard.convertToStreamShardMetadata(kinesisStreamShard)) + .isEqualTo(streamShardMetadata); } @Test public void testStreamShardMetadataSerializedUsingPojoSerializer() { TypeInformation typeInformation = TypeInformation.of(StreamShardMetadata.class); - assertTrue( - typeInformation.createSerializer(new ExecutionConfig()) instanceof PojoSerializer); + assertThat(typeInformation.createSerializer(new ExecutionConfig())) + .isInstanceOf(PojoSerializer.class); } /** @@ -984,9 +981,13 @@ public void markAsTemporarilyIdle() {} sourceFunc.cancel(); testHarness.close(); - assertEquals("record count", recordCount, testHarness.getOutput().size()); - assertThat(watermarks, org.hamcrest.Matchers.contains(new Watermark(-3), new Watermark(5))); - assertEquals("watermark count", watermarkCount, watermarks.size()); + assertThat(testHarness.getOutput()).as("record count").hasSize(recordCount); + assertThat(watermarks) + .satisfies( + matching( + org.hamcrest.Matchers.contains( + new Watermark(-3), new Watermark(5)))); + assertThat(watermarks).as("watermark count").hasSize(watermarkCount); } @Test @@ -1137,8 +1138,9 @@ public void emitWatermark(Watermark mark) { expectedResults.add(new Watermark(-4)); // verify watermark awaitRecordCount(results, expectedResults.size()); - assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray())); - assertEquals(0, TestWatermarkTracker.WATERMARK.get()); + assertThat(results) + .satisfies(matching(org.hamcrest.Matchers.contains(expectedResults.toArray()))); + assertThat(TestWatermarkTracker.WATERMARK.get()).isEqualTo(0); // trigger sync testHarness.setProcessingTime(testHarness.getProcessingTime() + 1); @@ -1155,31 +1157,33 @@ public void emitWatermark(Watermark mark) { while (deadline.hasTimeLeft() && emitterQueue.getSize() < 1) { Thread.sleep(10); } - assertEquals("first record received", 1, emitterQueue.getSize()); + assertThat(emitterQueue.getSize()).as("first record received").isEqualTo(1); // Advance the watermark. Since the new record is past global watermark + threshold, // it won't be emitted and the watermark does not advance testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval); - assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray())); - assertEquals( - 3000L, - (long) org.powermock.reflect.Whitebox.getInternalState(fetcher, "nextWatermark")); + assertThat(results) + .satisfies(matching(org.hamcrest.Matchers.contains(expectedResults.toArray()))); + assertThat((long) org.powermock.reflect.Whitebox.getInternalState(fetcher, "nextWatermark")) + .isEqualTo(3000L); TestWatermarkTracker.assertGlobalWatermark(-4); // Trigger global watermark sync testHarness.setProcessingTime(testHarness.getProcessingTime() + 1); expectedResults.add(Long.toString(record2)); awaitRecordCount(results, expectedResults.size()); - assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray())); + assertThat(results) + .satisfies(matching(org.hamcrest.Matchers.contains(expectedResults.toArray()))); TestWatermarkTracker.assertGlobalWatermark(3000); // Trigger watermark update and emit testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval); expectedResults.add(new Watermark(3000)); - assertThat(results, org.hamcrest.Matchers.contains(expectedResults.toArray())); + assertThat(results) + .satisfies(matching(org.hamcrest.Matchers.contains(expectedResults.toArray()))); // verify exception propagation - Assert.assertNull(sourceThreadError.get()); + assertThat(sourceThreadError.get()).isNull(); throwOnCollect.set(true); shard1.put(Long.toString(record2 + 1)); @@ -1187,8 +1191,8 @@ public void emitWatermark(Watermark mark) { while (deadline.hasTimeLeft() && sourceThreadError.get() == null) { Thread.sleep(10); } - Assert.assertNotNull(sourceThreadError.get()); - Assert.assertNotNull("expected", sourceThreadError.get().getMessage()); + assertThat(sourceThreadError.get()).isNotNull(); + assertThat(sourceThreadError.get().getMessage()).as("expected").isNotNull(); sourceFunc.cancel(); testHarness.close(); @@ -1203,8 +1207,7 @@ private void awaitRecordCount(ConcurrentLinkedQueue queue, int int received = queue.size(); if (received < count) { - Assert.fail( - String.format("Timeout waiting for records, received %d/%d", received, count)); + fail(String.format("Timeout waiting for records, received %d/%d", received, count)); } } @@ -1213,7 +1216,8 @@ private static class OpenCheckingStringSchema extends SimpleStringSchema { @Override public void open(DeserializationSchema.InitializationContext context) throws Exception { - assertThat(context.getMetricGroup(), notNullValue(MetricGroup.class)); + assertThat(context.getMetricGroup()) + .satisfies(matching(notNullValue(MetricGroup.class))); this.opened = true; } @@ -1257,7 +1261,7 @@ public long updateWatermark(long localWatermark) { } static void assertGlobalWatermark(long expected) { - Assert.assertEquals(expected, WATERMARK.get()); + assertThat(WATERMARK.get()).isEqualTo(expected); } } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java index 6a394860450db..9f0914e80721e 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java @@ -48,10 +48,10 @@ import java.util.stream.IntStream; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_POSITION; -import static org.hamcrest.Matchers.equalTo; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThan; -import static org.junit.Assert.assertThat; /** IT cases for using Kinesis consumer/producer based on Kinesalite. */ @Ignore("See FLINK-23528") @@ -123,14 +123,13 @@ public void testStopWithSavepoint() throws Exception { List result = stream.executeAndCollect(10000); // stop with savepoint will most likely only return a small subset of the elements // validate that the prefix is as expected - assertThat(result, hasSize(lessThan(numElements))); - assertThat( - result, - equalTo( + assertThat(result).satisfies(matching(hasSize(lessThan(numElements)))); + assertThat(result) + .isEqualTo( IntStream.range(0, numElements) .mapToObj(String::valueOf) .collect(Collectors.toList()) - .subList(0, result.size()))); + .subList(0, result.size())); } finally { stopTask.cancel(true); } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java index 387d2d3329f8d..3f7ad267ad45d 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java @@ -39,7 +39,6 @@ import com.amazonaws.services.kinesis.producer.KinesisProducerConfiguration; import com.amazonaws.services.kinesis.producer.UserRecordResult; import com.google.common.util.concurrent.SettableFuture; -import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -52,10 +51,8 @@ import java.util.List; import java.util.Properties; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; @@ -107,7 +104,7 @@ public void testProducerIsSerializable() { FlinkKinesisProducer producer = new FlinkKinesisProducer<>( new SimpleStringSchema(), TestUtils.getStandardProperties()); - assertTrue(InstantiationUtil.isSerializable(producer)); + assertThat(InstantiationUtil.isSerializable(producer)).isTrue(); } // ---------------------------------------------------------------------- @@ -139,15 +136,14 @@ public void testAsyncErrorRethrownOnInvoke() throws Throwable { testHarness.processElement(new StreamRecord<>("msg-2")); } catch (Exception e) { // the next invoke should rethrow the async exception - Assert.assertTrue( - ExceptionUtils.findThrowableWithMessage(e, "artificial async exception") - .isPresent()); + assertThat(ExceptionUtils.findThrowableWithMessage(e, "artificial async exception")) + .isPresent(); // test succeeded return; } - Assert.fail(); + fail("unknown failure"); } /** @@ -175,15 +171,14 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { testHarness.snapshot(123L, 123L); } catch (Exception e) { // the next checkpoint should rethrow the async exception - Assert.assertTrue( - ExceptionUtils.findThrowableWithMessage(e, "artificial async exception") - .isPresent()); + assertThat(ExceptionUtils.findThrowableWithMessage(e, "artificial async exception")) + .isPresent(); // test succeeded return; } - Assert.fail(); + fail("unknown failure"); } /** @@ -235,16 +230,16 @@ public void go() throws Exception { snapshotThread.sync(); } catch (Exception e) { // after the flush, the async exception should have been rethrown - Assert.assertTrue( - ExceptionUtils.findThrowableWithMessage( - e, "artificial async failure for 2nd message") - .isPresent()); + assertThat( + ExceptionUtils.findThrowableWithMessage( + e, "artificial async failure for 2nd message")) + .isPresent(); // test succeeded return; } - Assert.fail(); + fail("unknown failure"); } /** @@ -283,20 +278,23 @@ public void go() throws Exception { // blocked; // this would block forever if the snapshot didn't perform a flush producer.waitUntilFlushStarted(); - Assert.assertTrue( - "Snapshot returned before all records were flushed", snapshotThread.isAlive()); + assertThat(snapshotThread.isAlive()) + .as("Snapshot returned before all records were flushed") + .isTrue(); // now, complete the callbacks UserRecordResult result = mock(UserRecordResult.class); when(result.isSuccessful()).thenReturn(true); producer.getPendingRecordFutures().get(0).set(result); - Assert.assertTrue( - "Snapshot returned before all records were flushed", snapshotThread.isAlive()); + assertThat(snapshotThread.isAlive()) + .as("Snapshot returned before all records were flushed") + .isTrue(); producer.getPendingRecordFutures().get(1).set(result); - Assert.assertTrue( - "Snapshot returned before all records were flushed", snapshotThread.isAlive()); + assertThat(snapshotThread.isAlive()) + .as("Snapshot returned before all records were flushed") + .isTrue(); producer.getPendingRecordFutures().get(2).set(result); @@ -337,7 +335,7 @@ public void go() throws Exception { }; msg1.start(); msg1.trySync(deadline.timeLeftIfAny().toMillis()); - assertFalse("Flush triggered before reaching queue limit", msg1.isAlive()); + assertThat(msg1.isAlive()).as("Flush triggered before reaching queue limit").isFalse(); // consume msg-1 so that queue is empty again producer.getPendingRecordFutures().get(0).set(result); @@ -351,7 +349,7 @@ public void go() throws Exception { }; msg2.start(); msg2.trySync(deadline.timeLeftIfAny().toMillis()); - assertFalse("Flush triggered before reaching queue limit", msg2.isAlive()); + assertThat(msg2.isAlive()).as("Flush triggered before reaching queue limit").isFalse(); CheckedThread moreElementsThread = new CheckedThread() { @@ -365,7 +363,9 @@ public void go() throws Exception { }; moreElementsThread.start(); - assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive()); + assertThat(moreElementsThread.isAlive()) + .as("Producer should still block, but doesn't") + .isTrue(); // consume msg-2 from the queue, leaving msg-3 in the queue and msg-4 blocked while (producer.getPendingRecordFutures().size() < 2) { @@ -373,7 +373,9 @@ public void go() throws Exception { } producer.getPendingRecordFutures().get(1).set(result); - assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive()); + assertThat(moreElementsThread.isAlive()) + .as("Producer should still block, but doesn't") + .isTrue(); // consume msg-3, blocked msg-4 can be inserted into the queue and block is released while (producer.getPendingRecordFutures().size() < 3) { @@ -383,9 +385,9 @@ public void go() throws Exception { moreElementsThread.trySync(deadline.timeLeftIfAny().toMillis()); - assertFalse( - "Prodcuer still blocks although the queue is flushed", - moreElementsThread.isAlive()); + assertThat(moreElementsThread.isAlive()) + .as("Prodcuer still blocks although the queue is flushed") + .isFalse(); producer.getPendingRecordFutures().get(3).set(result); @@ -403,7 +405,7 @@ public void testOpen() throws Exception { new AbstractStreamOperatorTestHarness<>(new StreamSink<>(producer), 1, 1, 0); testHarness.open(); - assertThat("Open method was not called", serializationSchema.isOpenCalled(), is(true)); + assertThat(serializationSchema.isOpenCalled()).as("Open method was not called").isTrue(); } // ---------------------------------------------------------------------- diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java index c2d6dde7d96cc..6130d18cb7460 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java @@ -50,7 +50,6 @@ import com.amazonaws.services.kinesis.model.Shard; import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.commons.lang3.mutable.MutableLong; -import org.junit.Assert; import org.junit.Test; import org.powermock.reflect.Whitebox; @@ -73,11 +72,8 @@ import static java.util.Collections.singletonList; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -90,7 +86,7 @@ public void testIsRunning() { KinesisDataFetcher fetcher = createTestDataFetcherWithNoShards(10, 2, "test-stream"); - assertTrue(fetcher.isRunning()); + assertThat(fetcher.isRunning()).isTrue(); } @Test(timeout = 10000) @@ -101,7 +97,7 @@ public void testIsRunningFalseAfterShutDown() throws InterruptedException { fetcher.shutdownFetcher(); fetcher.awaitTermination(); - assertFalse(fetcher.isRunning()); + assertThat(fetcher.isRunning()).isFalse(); } @Test @@ -199,25 +195,24 @@ public void go() throws Exception { consumer.cancel(); consumerThread.sync(); - assertEquals(numShards, testShardStates.size()); + assertThat(testShardStates).hasSize(numShards); for (int i = 0; i < numShards; i++) { fetcher.emitRecordAndUpdateState( "record-" + i, 10L, i, new SequenceNumber("seq-num-1")); - assertEquals( - new SequenceNumber("seq-num-1"), - testShardStates.get(i).getLastProcessedSequenceNum()); - assertEquals( - new StreamRecord<>("record-" + i, 10L), sourceContext.removeLatestOutput()); + assertThat(testShardStates.get(i).getLastProcessedSequenceNum()) + .isEqualTo(new SequenceNumber("seq-num-1")); + assertThat(sourceContext.removeLatestOutput()) + .isEqualTo(new StreamRecord<>("record-" + i, 10L)); } // emitting a null (i.e., a corrupt record) should not produce any output, but still have // the shard state updated fetcher.emitRecordAndUpdateState(null, 10L, 1, new SequenceNumber("seq-num-2")); - assertEquals( - new SequenceNumber("seq-num-2"), - testShardStates.get(1).getLastProcessedSequenceNum()); - assertNull(sourceContext.removeLatestOutput()); // no output should have been collected + assertThat(testShardStates.get(1).getLastProcessedSequenceNum()) + .isEqualTo(new SequenceNumber("seq-num-2")); + assertThat(sourceContext.removeLatestOutput()) + .isNull(); // no output should have been collected } @Test @@ -271,16 +266,16 @@ public void go() throws Exception { // assert that the streams tracked in the state are identical to the subscribed streams Set streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet(); - assertEquals(fakeStreams.size(), streamsInState.size()); - assertTrue(streamsInState.containsAll(fakeStreams)); + assertThat(streamsInState).hasSize(fakeStreams.size()); + assertThat(streamsInState.containsAll(fakeStreams)).isTrue(); // assert that the last seen shards in state is correctly set for (Map.Entry streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) { - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder( - streamToShardCount.get(streamToLastSeenShard.getKey()) - 1), - streamToLastSeenShard.getValue()); + assertThat(streamToLastSeenShard.getValue()) + .isEqualTo( + KinesisShardIdGenerator.generateFromShardOrder( + streamToShardCount.get(streamToLastSeenShard.getKey()) - 1)); } } @@ -373,16 +368,16 @@ public void go() throws Exception { // assert that the streams tracked in the state are identical to the subscribed streams Set streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet(); - assertEquals(fakeStreams.size(), streamsInState.size()); - assertTrue(streamsInState.containsAll(fakeStreams)); + assertThat(streamsInState).hasSize(fakeStreams.size()); + assertThat(streamsInState.containsAll(fakeStreams)).isTrue(); // assert that the last seen shards in state is correctly set for (Map.Entry streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) { - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder( - streamToShardCount.get(streamToLastSeenShard.getKey()) - 1), - streamToLastSeenShard.getValue()); + assertThat(streamToLastSeenShard.getValue()) + .isEqualTo( + KinesisShardIdGenerator.generateFromShardOrder( + streamToShardCount.get(streamToLastSeenShard.getKey()) - 1)); } } @@ -480,16 +475,16 @@ public void go() throws Exception { // assert that the streams tracked in the state are identical to the subscribed streams Set streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet(); - assertEquals(fakeStreams.size(), streamsInState.size()); - assertTrue(streamsInState.containsAll(fakeStreams)); + assertThat(streamsInState).hasSize(fakeStreams.size()); + assertThat(streamsInState.containsAll(fakeStreams)).isTrue(); // assert that the last seen shards in state is correctly set for (Map.Entry streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) { - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder( - streamToShardCount.get(streamToLastSeenShard.getKey()) - 1), - streamToLastSeenShard.getValue()); + assertThat(streamToLastSeenShard.getValue()) + .isEqualTo( + KinesisShardIdGenerator.generateFromShardOrder( + streamToShardCount.get(streamToLastSeenShard.getKey()) - 1)); } } @@ -587,18 +582,16 @@ public void go() throws Exception { // assert that the streams tracked in the state are identical to the subscribed streams Set streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet(); - assertEquals(fakeStreams.size(), streamsInState.size()); - assertTrue(streamsInState.containsAll(fakeStreams)); + assertThat(streamsInState).hasSize(fakeStreams.size()); + assertThat(streamsInState.containsAll(fakeStreams)).isTrue(); // assert that the last seen shards in state is correctly set - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder(2), - subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream1")); - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder(1), - subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream2")); - assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream3")); - assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream4")); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream1")) + .isEqualTo(KinesisShardIdGenerator.generateFromShardOrder(2)); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream2")) + .isEqualTo(KinesisShardIdGenerator.generateFromShardOrder(1)); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream3")).isNull(); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream4")).isNull(); } @Test @@ -699,18 +692,16 @@ public void go() throws Exception { // assert that the streams tracked in the state are identical to the subscribed streams Set streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet(); - assertEquals(fakeStreams.size(), streamsInState.size()); - assertTrue(streamsInState.containsAll(fakeStreams)); + assertThat(streamsInState).hasSize(fakeStreams.size()); + assertThat(streamsInState.containsAll(fakeStreams)).isTrue(); // assert that the last seen shards in state is correctly set - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder(3), - subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream1")); - assertEquals( - KinesisShardIdGenerator.generateFromShardOrder(4), - subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream2")); - assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream3")); - assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream4")); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream1")) + .isEqualTo(KinesisShardIdGenerator.generateFromShardOrder(3)); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream2")) + .isEqualTo(KinesisShardIdGenerator.generateFromShardOrder(4)); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream3")).isNull(); + assertThat(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream4")).isNull(); } @Test @@ -749,12 +740,10 @@ public void testStreamShardMetadataAndHandleConversion() { .withEndingSequenceNumber(endingSequenceNumber)); StreamShardHandle streamShardHandle = new StreamShardHandle(streamName, shard); - assertEquals( - kinesisStreamShard, - KinesisDataFetcher.convertToStreamShardMetadata(streamShardHandle)); - assertEquals( - streamShardHandle, - KinesisDataFetcher.convertToStreamShardHandle(kinesisStreamShard)); + assertThat(KinesisDataFetcher.convertToStreamShardMetadata(streamShardHandle)) + .isEqualTo(kinesisStreamShard); + assertThat(KinesisDataFetcher.convertToStreamShardHandle(kinesisStreamShard)) + .isEqualTo(streamShardHandle); } private static class DummyFlinkKinesisConsumer extends FlinkKinesisConsumer { @@ -841,9 +830,9 @@ public void testShardToSubtaskMappingWithCustomHashFunction() throws Exception { String msg = String.format("for hash=%d, subtask=%d", hash, subtaskIndex); if (j == i) { - assertEquals(msg, shardCount, shards.size()); + assertThat(shards).as(msg).hasSize(shardCount); } else { - assertEquals(msg, 0, shards.size()); + assertThat(shards).as(msg).isEmpty(); } } } @@ -851,12 +840,12 @@ public void testShardToSubtaskMappingWithCustomHashFunction() throws Exception { @Test public void testIsThisSubtaskShouldSubscribeTo() { - assertTrue(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(0, 2, 0)); - assertFalse(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(1, 2, 0)); - assertTrue(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(2, 2, 0)); - assertFalse(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(0, 2, 1)); - assertTrue(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(1, 2, 1)); - assertFalse(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(2, 2, 1)); + assertThat(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(0, 2, 0)).isTrue(); + assertThat(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(1, 2, 0)).isFalse(); + assertThat(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(2, 2, 0)).isTrue(); + assertThat(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(0, 2, 1)).isFalse(); + assertThat(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(1, 2, 1)).isTrue(); + assertThat(KinesisDataFetcher.isThisSubtaskShouldSubscribeTo(2, 2, 1)).isFalse(); } private static BoundedOutOfOrdernessTimestampExtractor watermarkAssigner = @@ -930,34 +919,34 @@ protected long getCurrentTimeMillis() { new StreamRecord<>(String.valueOf(Long.MIN_VALUE), Long.MIN_VALUE); fetcher.emitRecordAndUpdateState( record1.getValue(), record1.getTimestamp(), shardIndex, seq); - Assert.assertEquals(record1, sourceContext.getCollectedOutputs().poll()); + assertThat(sourceContext.getCollectedOutputs().poll()).isEqualTo(record1); fetcher.emitWatermark(); - Assert.assertTrue("potential watermark equals previous watermark", watermarks.isEmpty()); + assertThat(watermarks).as("potential watermark equals previous watermark").isEmpty(); StreamRecord record2 = new StreamRecord<>(String.valueOf(1), 1); fetcher.emitRecordAndUpdateState( record2.getValue(), record2.getTimestamp(), shardIndex, seq); - Assert.assertEquals(record2, sourceContext.getCollectedOutputs().poll()); + assertThat(sourceContext.getCollectedOutputs().poll()).isEqualTo(record2); fetcher.emitWatermark(); - Assert.assertFalse("watermark advanced", watermarks.isEmpty()); - Assert.assertEquals(new Watermark(record2.getTimestamp()), watermarks.remove(0)); - Assert.assertFalse("not idle", isTemporaryIdle.booleanValue()); + assertThat(watermarks).as("watermark advanced").isNotEmpty(); + assertThat(watermarks.remove(0)).isEqualTo(new Watermark(record2.getTimestamp())); + assertThat(isTemporaryIdle.booleanValue()).as("not idle").isFalse(); // test idle timeout long idleTimeout = 10; // advance clock idleTimeout clock.add(idleTimeout + 1); fetcher.emitWatermark(); - Assert.assertFalse("not idle", isTemporaryIdle.booleanValue()); - Assert.assertTrue("not idle, no new watermark", watermarks.isEmpty()); + assertThat(isTemporaryIdle.booleanValue()).as("not idle").isFalse(); + assertThat(watermarks).as("not idle, no new watermark").isEmpty(); // activate idle timeout Whitebox.setInternalState(fetcher, "shardIdleIntervalMillis", idleTimeout); fetcher.emitWatermark(); - Assert.assertTrue("idle", isTemporaryIdle.booleanValue()); - Assert.assertTrue("idle, no watermark", watermarks.isEmpty()); + assertThat(isTemporaryIdle.booleanValue()).as("idle").isTrue(); + assertThat(watermarks).as("idle, no watermark").isEmpty(); } @Test @@ -1030,9 +1019,10 @@ public void go() throws Exception { fail("Expected exception from deserializer, but got: " + e); } - assertTrue( - "Expected Fetcher to have been interrupted. This test didn't accomplish its goal.", - fetcher.wasInterrupted); + assertThat(fetcher.wasInterrupted) + .as( + "Expected Fetcher to have been interrupted. This test didn't accomplish its goal.") + .isTrue(); } @Test(timeout = 1000L) diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java index 2d5391797c425..1b614cfd7ed4d 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerFanOutTest.java @@ -41,8 +41,7 @@ import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM; import static org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils.efoProperties; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AFTER_SEQUENCE_NUMBER; import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AT_SEQUENCE_NUMBER; import static software.amazon.awssdk.services.kinesis.model.ShardIteratorType.AT_TIMESTAMP; @@ -56,7 +55,7 @@ public void testEmptyShard() throws Exception { assertNumberOfMessagesReceivedFromKinesis(0, kinesis, fakeSequenceNumber()); - assertEquals(1, kinesis.getNumberOfSubscribeToShardInvocations()); + assertThat(kinesis.getNumberOfSubscribeToShardInvocations()).isEqualTo(1); } @Test @@ -70,10 +69,10 @@ public void testStartFromLatestIsTranslatedToTimestamp() throws Exception { assertNumberOfMessagesReceivedFromKinesis(10, kinesis, sequenceNumber, efoProperties()); StartingPosition actual = kinesis.getStartingPositionForSubscription(0); - assertEquals(AT_TIMESTAMP, actual.type()); + assertThat(actual.type()).isEqualTo(AT_TIMESTAMP); // Considering milliseconds to make now have the same precision as actual - assertTrue(now.toEpochMilli() <= actual.timestamp().toEpochMilli()); + assertThat(now.toEpochMilli()).isLessThanOrEqualTo(actual.timestamp().toEpochMilli()); } @Test @@ -87,10 +86,10 @@ public void testStartFromLatestReceivesNoRecordsContinuesToUseTimestamp() throws assertNumberOfMessagesReceivedFromKinesis(1, kinesis, sequenceNumber, efoProperties()); // This fake Kinesis will give 2 subscriptions - assertEquals(2, kinesis.getNumberOfSubscribeToShardInvocations()); + assertThat(kinesis.getNumberOfSubscribeToShardInvocations()).isEqualTo(2); - assertEquals(AT_TIMESTAMP, kinesis.getStartingPositionForSubscription(0).type()); - assertEquals(AT_TIMESTAMP, kinesis.getStartingPositionForSubscription(1).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).type()).isEqualTo(AT_TIMESTAMP); + assertThat(kinesis.getStartingPositionForSubscription(1).type()).isEqualTo(AT_TIMESTAMP); } @Test @@ -111,8 +110,8 @@ public void testBoundedShardConsumesFromTimestamp() throws Exception { assertNumberOfMessagesReceivedFromKinesis(10, kinesis, sequenceNumber, consumerConfig); StartingPosition actual = kinesis.getStartingPositionForSubscription(0); - assertEquals(AT_TIMESTAMP, actual.type()); - assertEquals(expectedTimestamp, actual.timestamp()); + assertThat(actual.type()).isEqualTo(AT_TIMESTAMP); + assertThat(actual.timestamp()).isEqualTo(expectedTimestamp); } @Test @@ -126,7 +125,7 @@ public void testMillisBehindReported() throws Exception { ShardConsumerMetricsReporter metrics = assertNumberOfMessagesReceivedFromKinesis(10, kinesis, fakeSequenceNumber()); - assertEquals(123L, metrics.getMillisBehindLatest()); + assertThat(metrics.getMillisBehindLatest()).isEqualTo(123L); } @Test @@ -140,7 +139,7 @@ public void testBoundedShardConsumesCorrectNumberOfMessages() throws Exception { // 10 batches of 5 records = 50 assertNumberOfMessagesReceivedFromKinesis(50, kinesis, fakeSequenceNumber()); - assertEquals(1, kinesis.getNumberOfSubscribeToShardInvocations()); + assertThat(kinesis.getNumberOfSubscribeToShardInvocations()).isEqualTo(1); } @Test @@ -156,11 +155,12 @@ public void testBoundedShardResubscribesToShard() throws Exception { assertNumberOfMessagesReceivedFromKinesis(1000, kinesis, fakeSequenceNumber()); // 100 batches / 5 batches per subscription = 20 subscriptions - assertEquals(20, kinesis.getNumberOfSubscribeToShardInvocations()); + assertThat(kinesis.getNumberOfSubscribeToShardInvocations()).isEqualTo(20); // Starting from non-aggregated sequence number means we should start AFTER the sequence // number - assertEquals(AFTER_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).type()) + .isEqualTo(AFTER_SEQUENCE_NUMBER); } @Test @@ -193,7 +193,8 @@ public void testBoundedShardResumingConsumptionFromAggregatedSubsequenceNumber() assertNumberOfMessagesReceivedFromKinesis(94, kinesis, subsequenceNumber); // Starting from aggregated sequence number means we should start AT the sequence number - assertEquals(AT_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).type()) + .isEqualTo(AT_SEQUENCE_NUMBER); } @Test @@ -209,7 +210,7 @@ public void testSubscribeToShardUsesCorrectStartingSequenceNumbers() throws Exce assertNumberOfMessagesReceivedFromKinesis(10, kinesis, new SequenceNumber("0")); // 10 batches / 2 batches per subscription = 5 subscriptions - assertEquals(5, kinesis.getNumberOfSubscribeToShardInvocations()); + assertThat(kinesis.getNumberOfSubscribeToShardInvocations()).isEqualTo(5); // Starting positions should correlate to the last consumed sequence number assertStartingPositionAfterSequenceNumber( @@ -271,8 +272,8 @@ public void testShardConsumerRetriesGenericSdkError() throws Exception { private void assertStartingPositionAfterSequenceNumber( final StartingPosition startingPosition, final String sequenceNumber) { - assertEquals(AFTER_SEQUENCE_NUMBER, startingPosition.type()); - assertEquals(sequenceNumber, startingPosition.sequenceNumber()); + assertThat(startingPosition.type()).isEqualTo(AFTER_SEQUENCE_NUMBER); + assertThat(startingPosition.sequenceNumber()).isEqualTo(sequenceNumber); } private ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKinesis( diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java index 22b5d03be8789..0d80f21995451 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTest.java @@ -38,8 +38,7 @@ import static org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumerTestUtils.fakeSequenceNumber; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.eq; @@ -58,7 +57,7 @@ public void testMetricsReporting() throws Exception { ShardConsumerMetricsReporter metrics = assertNumberOfMessagesReceivedFromKinesis(500, kinesis, fakeSequenceNumber()); - assertEquals(500, metrics.getMillisBehindLatest()); + assertThat(metrics.getMillisBehindLatest()).isEqualTo(500); } @Test @@ -69,7 +68,7 @@ public void testConsumerAndProducerMetricsAreUnregisteredAfterShardCompletes() 500, 5, 500); AbstractMetricGroup metricGroup = createFakeShardConsumerMetricGroup(); assertNumberOfMessagesReceivedFromKinesis(500, kinesis, fakeSequenceNumber(), metricGroup); - assertTrue(metricGroup.isClosed()); + assertThat(metricGroup.isClosed()).isTrue(); } @Test @@ -181,8 +180,8 @@ public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithAggregatedRecords // 10 * 3 * 5 = 150 ShardConsumerMetricsReporter metrics = assertNumberOfMessagesReceivedFromKinesis(150, kinesis, fakeSequenceNumber()); - assertEquals(3, metrics.getNumberOfAggregatedRecords()); - assertEquals(15, metrics.getNumberOfDeaggregatedRecords()); + assertThat(metrics.getNumberOfAggregatedRecords()).isEqualTo(3); + assertThat(metrics.getNumberOfDeaggregatedRecords()).isEqualTo(15); verify(kinesis) .getShardIterator(any(), eq("AFTER_SEQUENCE_NUMBER"), eq("fakeStartingState")); @@ -202,8 +201,8 @@ public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithAggregatedRecords // 5 * 1 * 10 - 6 = 44 ShardConsumerMetricsReporter metrics = assertNumberOfMessagesReceivedFromKinesis(44, kinesis, sequenceNumber); - assertEquals(1, metrics.getNumberOfAggregatedRecords()); - assertEquals(10, metrics.getNumberOfDeaggregatedRecords()); + assertThat(metrics.getNumberOfAggregatedRecords()).isEqualTo(1); + assertThat(metrics.getNumberOfDeaggregatedRecords()).isEqualTo(10); verify(kinesis).getShardIterator(any(), eq("AT_SEQUENCE_NUMBER"), eq("0")); } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java index f3e45ff3903fe..a7b08a4dc8788 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/ShardConsumerTestUtils.java @@ -49,7 +49,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for the {@link ShardConsumer}. */ public class ShardConsumerTestUtils { @@ -150,10 +150,9 @@ public static ShardConsumerMetricsReporter assertNumberOfMessagesReceivedFromKin deserializationSchema) .run(); - assertEquals(expectedNumberOfMessages, sourceContext.getCollectedOutputs().size()); - assertEquals( - expectedLastProcessedSequenceNum, - subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum()); + assertThat(sourceContext.getCollectedOutputs()).hasSize(expectedNumberOfMessages); + assertThat(subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum()) + .isEqualTo(expectedLastProcessedSequenceNum); return shardMetricsReporter; } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatchTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatchTest.java index b62082f18c1c9..e072fcaa70034 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatchTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/RecordBatchTest.java @@ -32,7 +32,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils.createDummyStreamShardHandle; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link RecordBatch}. */ public class RecordBatchTest { @@ -45,10 +45,10 @@ public void testDeaggregateRecordsPassThrough() { createDummyStreamShardHandle(), 100L); - assertEquals(4, result.getAggregatedRecordSize()); - assertEquals(4, result.getDeaggregatedRecordSize()); - assertEquals(128, result.getTotalSizeInBytes()); - assertEquals(32, result.getAverageRecordSizeBytes()); + assertThat(result.getAggregatedRecordSize()).isEqualTo(4); + assertThat(result.getDeaggregatedRecordSize()).isEqualTo(4); + assertThat(result.getTotalSizeInBytes()).isEqualTo(128); + assertThat(result.getAverageRecordSizeBytes()).isEqualTo(32); } @Test @@ -57,19 +57,19 @@ public void testDeaggregateRecordsWithAggregatedRecords() { TestUtils.createAggregatedRecordBatch(5, 5, new AtomicInteger()); RecordBatch result = new RecordBatch(records, createDummyStreamShardHandle(), 100L); - assertEquals(5, result.getAggregatedRecordSize()); - assertEquals(25, result.getDeaggregatedRecordSize()); - assertEquals(25 * 1024, result.getTotalSizeInBytes()); - assertEquals(1024, result.getAverageRecordSizeBytes()); + assertThat(result.getAggregatedRecordSize()).isEqualTo(5); + assertThat(result.getDeaggregatedRecordSize()).isEqualTo(25); + assertThat(result.getTotalSizeInBytes()).isEqualTo(25 * 1024); + assertThat(result.getAverageRecordSizeBytes()).isEqualTo(1024); } @Test public void testGetAverageRecordSizeBytesEmptyList() { RecordBatch result = new RecordBatch(emptyList(), createDummyStreamShardHandle(), 100L); - assertEquals(0, result.getAggregatedRecordSize()); - assertEquals(0, result.getDeaggregatedRecordSize()); - assertEquals(0, result.getAverageRecordSizeBytes()); + assertThat(result.getAggregatedRecordSize()).isEqualTo(0); + assertThat(result.getDeaggregatedRecordSize()).isEqualTo(0); + assertThat(result.getAverageRecordSizeBytes()).isEqualTo(0); } @Test @@ -77,7 +77,7 @@ public void testGetMillisBehindLatest() { RecordBatch result = new RecordBatch(singletonList(record("1")), createDummyStreamShardHandle(), 100L); - assertEquals(Long.valueOf(100), result.getMillisBehindLatest()); + assertThat(result.getMillisBehindLatest()).isEqualTo(Long.valueOf(100)); } private Record record(final String sequenceNumber) { diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java index a558ffbdba511..4e9ad04eb3ded 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherConfigurationTest.java @@ -44,7 +44,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.REGISTER_STREAM_TIMEOUT_SECONDS; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_TIMEOUT_SECONDS; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link FanOutRecordPublisherConfiguration}. */ public class FanOutRecordPublisherConfigurationTest extends TestLogger { @@ -70,9 +70,8 @@ public void testEagerStrategyWithConsumerName() { testConfig.setProperty(EFO_CONSUMER_NAME, fakedConsumerName); FanOutRecordPublisherConfiguration fanOutRecordPublisherConfiguration = new FanOutRecordPublisherConfiguration(testConfig, new ArrayList<>()); - assertEquals( - fanOutRecordPublisherConfiguration.getConsumerName(), - Optional.of(fakedConsumerName)); + assertThat(Optional.of(fakedConsumerName)) + .isEqualTo(fanOutRecordPublisherConfiguration.getConsumerName()); } @Test @@ -101,9 +100,8 @@ public void testNoneStrategyWithStreams() { expectedStreamArns.put("fakedstream1", "fakedstream1"); expectedStreamArns.put("fakedstream2", "fakedstream2"); - assertEquals( - fanOutRecordPublisherConfiguration.getStreamConsumerArn("fakedstream1"), - Optional.of("fakedstream1")); + assertThat(Optional.of("fakedstream1")) + .isEqualTo(fanOutRecordPublisherConfiguration.getStreamConsumerArn("fakedstream1")); } @Test @@ -149,8 +147,10 @@ public void testParseRegisterStreamConsumerTimeout() { FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(testConfig, Collections.emptyList()); - assertEquals(Duration.ofSeconds(120), configuration.getRegisterStreamConsumerTimeout()); - assertEquals(Duration.ofSeconds(60), configuration.getDeregisterStreamConsumerTimeout()); + assertThat(configuration.getRegisterStreamConsumerTimeout()) + .isEqualTo(Duration.ofSeconds(120)); + assertThat(configuration.getDeregisterStreamConsumerTimeout()) + .isEqualTo(Duration.ofSeconds(60)); } @Test @@ -163,8 +163,10 @@ public void testParseDeregisterStreamConsumerTimeout() { FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(testConfig, Collections.emptyList()); - assertEquals(Duration.ofSeconds(60), configuration.getRegisterStreamConsumerTimeout()); - assertEquals(Duration.ofSeconds(240), configuration.getDeregisterStreamConsumerTimeout()); + assertThat(configuration.getRegisterStreamConsumerTimeout()) + .isEqualTo(Duration.ofSeconds(60)); + assertThat(configuration.getDeregisterStreamConsumerTimeout()) + .isEqualTo(Duration.ofSeconds(240)); } @Test @@ -177,7 +179,7 @@ public void testParseSubscribeToShardTimeout() { FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(testConfig, Collections.emptyList()); - assertEquals(Duration.ofSeconds(123), configuration.getSubscribeToShardTimeout()); + assertThat(configuration.getSubscribeToShardTimeout()).isEqualTo(Duration.ofSeconds(123)); } @Test @@ -189,6 +191,6 @@ public void testDefaultSubscribeToShardTimeout() { FanOutRecordPublisherConfiguration configuration = new FanOutRecordPublisherConfiguration(testConfig, Collections.emptyList()); - assertEquals(Duration.ofSeconds(60), configuration.getSubscribeToShardTimeout()); + assertThat(configuration.getSubscribeToShardTimeout()).isEqualTo(Duration.ofSeconds(60)); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java index 7ce16d36e25ca..548ef7373193b 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/FanOutRecordPublisherTest.java @@ -32,7 +32,6 @@ import com.amazonaws.http.timers.client.SdkInterruptedException; import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord; import io.netty.handler.timeout.ReadTimeoutException; -import org.hamcrest.Matchers; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -64,10 +63,7 @@ import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.emptyShard; import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.singletonShard; import static org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils.createDummyStreamShardHandle; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.anyDouble; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; @@ -109,9 +105,10 @@ public void testToSdkV2StartingPositionAfterSequenceNumber() throws Exception { kinesis, StartingPosition.continueFromSequenceNumber(SEQUENCE_NUMBER)); publisher.run(new TestConsumer()); - assertEquals( - DUMMY_SEQUENCE, kinesis.getStartingPositionForSubscription(0).sequenceNumber()); - assertEquals(AFTER_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).sequenceNumber()) + .isEqualTo(DUMMY_SEQUENCE); + assertThat(kinesis.getStartingPositionForSubscription(0).type()) + .isEqualTo(AFTER_SEQUENCE_NUMBER); } @Test @@ -124,9 +121,10 @@ public void testToSdkV2StartingPositionAtSequenceNumber() throws Exception { StartingPosition.restartFromSequenceNumber(AGGREGATED_SEQUENCE_NUMBER)); publisher.run(new TestConsumer()); - assertEquals( - DUMMY_SEQUENCE, kinesis.getStartingPositionForSubscription(0).sequenceNumber()); - assertEquals(AT_SEQUENCE_NUMBER, kinesis.getStartingPositionForSubscription(0).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).sequenceNumber()) + .isEqualTo(DUMMY_SEQUENCE); + assertThat(kinesis.getStartingPositionForSubscription(0).type()) + .isEqualTo(AT_SEQUENCE_NUMBER); } @Test @@ -136,8 +134,8 @@ public void testToSdkV2StartingPositionLatest() throws Exception { RecordPublisher publisher = createRecordPublisher(kinesis, latest()); publisher.run(new TestConsumer()); - assertNull(kinesis.getStartingPositionForSubscription(0).sequenceNumber()); - assertEquals(LATEST, kinesis.getStartingPositionForSubscription(0).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).sequenceNumber()).isNull(); + assertThat(kinesis.getStartingPositionForSubscription(0).type()).isEqualTo(LATEST); } @Test @@ -151,8 +149,8 @@ public void testToSdkV2StartingPositionTrimHorizon() throws Exception { SENTINEL_EARLIEST_SEQUENCE_NUM.get())); publisher.run(new TestConsumer()); - assertNull(kinesis.getStartingPositionForSubscription(0).sequenceNumber()); - assertEquals(TRIM_HORIZON, kinesis.getStartingPositionForSubscription(0).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).sequenceNumber()).isNull(); + assertThat(kinesis.getStartingPositionForSubscription(0).type()).isEqualTo(TRIM_HORIZON); } @Test @@ -164,8 +162,9 @@ public void testToSdkV2StartingPositionAtTimeStamp() throws Exception { createRecordPublisher(kinesis, StartingPosition.fromTimestamp(now)); publisher.run(new TestConsumer()); - assertEquals(now.toInstant(), kinesis.getStartingPositionForSubscription(0).timestamp()); - assertEquals(AT_TIMESTAMP, kinesis.getStartingPositionForSubscription(0).type()); + assertThat(kinesis.getStartingPositionForSubscription(0).timestamp()) + .isEqualTo(now.toInstant()); + assertThat(kinesis.getStartingPositionForSubscription(0).type()).isEqualTo(AT_TIMESTAMP); } @Test @@ -188,11 +187,11 @@ public void testToSdkV1Records() throws Exception { publisher.run(consumer); UserRecord actual = consumer.getRecordBatches().get(0).getDeaggregatedRecords().get(0); - assertFalse(actual.isAggregated()); - assertEquals(now, actual.getApproximateArrivalTimestamp()); - assertEquals("sn", actual.getSequenceNumber()); - assertEquals("pk", actual.getPartitionKey()); - assertThat(toByteArray(actual.getData()), Matchers.equalTo(data)); + assertThat(actual.isAggregated()).isFalse(); + assertThat(actual.getApproximateArrivalTimestamp()).isEqualTo(now); + assertThat(actual.getSequenceNumber()).isEqualTo("sn"); + assertThat(actual.getPartitionKey()).isEqualTo("pk"); + assertThat(toByteArray(actual.getData())).isEqualTo(data); } @Test @@ -231,10 +230,10 @@ public void testShardConsumerCompletesIfResourceNotFoundExceptionThrownFromSubsc RecordPublisher recordPublisher = createRecordPublisher(kinesis); TestConsumer consumer = new TestConsumer(); - assertEquals(COMPLETE, recordPublisher.run(consumer)); + assertThat(recordPublisher.run(consumer)).isEqualTo(COMPLETE); // Will exit on the first subscription - assertEquals(1, kinesis.getNumberOfSubscribeToShardInvocations()); + assertThat(kinesis.getNumberOfSubscribeToShardInvocations()).isEqualTo(1); } @Test @@ -250,11 +249,11 @@ public void testShardConsumerRetriesIfLimitExceededExceptionThrownFromSubscripti // An exception is thrown after the 5th record in each subscription, therefore we expect to // receive 5 records - assertEquals(5, consumer.getRecordBatches().size()); - assertEquals(1, kinesis.getNumberOfSubscribeToShardInvocations()); + assertThat(consumer.getRecordBatches()).hasSize(5); + assertThat(kinesis.getNumberOfSubscribeToShardInvocations()).isEqualTo(1); // INCOMPLETE is returned to indicate the shard is not complete - assertEquals(INCOMPLETE, result); + assertThat(result).isEqualTo(INCOMPLETE); } @Test @@ -443,11 +442,12 @@ public void testRecordDurability() throws Exception { List userRecords = flattenToUserRecords(consumer.getRecordBatches()); // Should have received 10 * 12 = 120 records - assertEquals(120, userRecords.size()); + assertThat(userRecords).hasSize(120); int expectedSequenceNumber = 1; for (UserRecord record : userRecords) { - assertEquals(String.valueOf(expectedSequenceNumber++), record.getSequenceNumber()); + assertThat(record.getSequenceNumber()) + .isEqualTo(String.valueOf(expectedSequenceNumber++)); } } @@ -473,13 +473,13 @@ public void testAggregatedRecordDurability() throws Exception { List userRecords = flattenToUserRecords(consumer.getRecordBatches()); // Should have received 10 * 12 * 5 = 600 records - assertEquals(600, userRecords.size()); + assertThat(userRecords).hasSize(600); int sequence = 1; long subsequence = 0; for (UserRecord userRecord : userRecords) { - assertEquals(String.valueOf(sequence), userRecord.getSequenceNumber()); - assertEquals(subsequence++, userRecord.getSubSequenceNumber()); + assertThat(userRecord.getSequenceNumber()).isEqualTo(String.valueOf(sequence)); + assertThat(userRecord.getSubSequenceNumber()).isEqualTo(subsequence++); if (subsequence == 5) { sequence++; @@ -499,7 +499,7 @@ public void testInterruptedPublisherReturnsCancelled() throws Exception { kinesis, StartingPosition.continueFromSequenceNumber(SEQUENCE_NUMBER)); RecordPublisherRunResult actual = publisher.run(new TestConsumer()); - assertEquals(CANCELLED, actual); + assertThat(actual).isEqualTo(CANCELLED); } private List flattenToUserRecords(final List recordBatch) { diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java index ea89c88747de4..b1312fb62a82a 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/fanout/StreamConsumerRegistrarTest.java @@ -49,7 +49,7 @@ import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.STREAM_CONSUMER_ARN_EXISTING; import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.STREAM_CONSUMER_ARN_NEW; import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisFanOutBehavioursFactory.StreamConsumerFakeKinesis.NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.anyDouble; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; @@ -96,7 +96,7 @@ public void testRegisterStreamConsumerRegistersNewStreamConsumer() throws Except String result = registrar.registerStreamConsumer(STREAM, "name"); - assertEquals(STREAM_CONSUMER_ARN_NEW, result); + assertThat(result).isEqualTo(STREAM_CONSUMER_ARN_NEW); } @Test @@ -110,7 +110,7 @@ public void testRegisterStreamConsumerThatAlreadyExistsAndActive() throws Except String result = registrar.registerStreamConsumer(STREAM, "name"); verify(backoff, never()).sleep(anyLong()); - assertEquals(STREAM_CONSUMER_ARN_EXISTING, result); + assertThat(result).isEqualTo(STREAM_CONSUMER_ARN_EXISTING); } @Test @@ -125,12 +125,11 @@ public void testRegisterStreamConsumerWaitsForConsumerToBecomeActive() throws Ex // we backoff on each retry verify(backoff, times(NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE - 1)).sleep(anyLong()); - assertEquals(STREAM_CONSUMER_ARN_EXISTING, result); + assertThat(result).isEqualTo(STREAM_CONSUMER_ARN_EXISTING); // We will invoke describe stream until the stream consumer is activated - assertEquals( - NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE, - kinesis.getNumberOfDescribeStreamConsumerInvocations()); + assertThat(kinesis.getNumberOfDescribeStreamConsumerInvocations()) + .isEqualTo(NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE); for (int i = 1; i < NUMBER_OF_DESCRIBE_REQUESTS_TO_ACTIVATE; i++) { verify(backoff).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(i)); @@ -176,7 +175,7 @@ public void testRegistrationBackoffForLazy() throws Exception { String result = registrar.registerStreamConsumer(STREAM, "name"); verify(backoff).sleep(anyLong()); - assertEquals(STREAM_CONSUMER_ARN_EXISTING, result); + assertThat(result).isEqualTo(STREAM_CONSUMER_ARN_EXISTING); } @Test @@ -190,7 +189,7 @@ public void testDeregisterStreamConsumerAndWaitForDeletingStatus() throws Except registrar.deregisterStreamConsumer(STREAM); // We will invoke describe stream until the stream consumer is in the DELETING state - assertEquals(2, kinesis.getNumberOfDescribeStreamConsumerInvocations()); + assertThat(kinesis.getNumberOfDescribeStreamConsumerInvocations()).isEqualTo(2); for (int i = 1; i < 2; i++) { verify(backoff).calculateFullJitterBackoff(anyLong(), anyLong(), anyDouble(), eq(i)); @@ -228,7 +227,7 @@ public void testDeregisterStreamConsumerNotFound() throws Exception { registrar.deregisterStreamConsumer(STREAM); - assertEquals(1, kinesis.getNumberOfDescribeStreamConsumerInvocations()); + assertThat(kinesis.getNumberOfDescribeStreamConsumerInvocations()).isEqualTo(1); } @Test diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfigurationTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfigurationTest.java index 35f82c24afa2a..58e29de735191 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfigurationTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherConfigurationTest.java @@ -24,9 +24,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SHARD_GETRECORDS_INTERVAL_MILLIS; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SHARD_GETRECORDS_MAX; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link PollingRecordPublisherConfiguration}. */ public class PollingRecordPublisherConfigurationTest { @@ -35,9 +33,9 @@ public class PollingRecordPublisherConfigurationTest { public void testDefaults() { PollingRecordPublisherConfiguration configuration = new PollingRecordPublisherConfiguration(new Properties()); - assertEquals(configuration.getFetchIntervalMillis(), 200); - assertEquals(configuration.getMaxNumberOfRecordsPerFetch(), 10000); - assertFalse(configuration.isAdaptiveReads()); + assertThat(configuration.getFetchIntervalMillis()).isEqualTo(200); + assertThat(configuration.getMaxNumberOfRecordsPerFetch()).isEqualTo(10000); + assertThat(configuration.isAdaptiveReads()).isFalse(); } @Test @@ -46,7 +44,7 @@ public void testGetFetchIntervalMillis() { PollingRecordPublisherConfiguration configuration = new PollingRecordPublisherConfiguration(properties); - assertEquals(configuration.getFetchIntervalMillis(), 1); + assertThat(configuration.getFetchIntervalMillis()).isEqualTo(1); } @Test @@ -55,7 +53,7 @@ public void testGetMaxNumberOfRecordsPerFetch() { PollingRecordPublisherConfiguration configuration = new PollingRecordPublisherConfiguration(properties); - assertEquals(configuration.getMaxNumberOfRecordsPerFetch(), 2); + assertThat(configuration.getMaxNumberOfRecordsPerFetch()).isEqualTo(2); } @Test @@ -64,7 +62,7 @@ public void testIsAdaptiveReads() { PollingRecordPublisherConfiguration configuration = new PollingRecordPublisherConfiguration(properties); - assertTrue(configuration.isAdaptiveReads()); + assertThat(configuration.isAdaptiveReads()).isTrue(); } private Properties properties(final String key, final String value) { diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactoryTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactoryTest.java index 0bf56886dec89..373555e330aa7 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactoryTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherFactoryTest.java @@ -29,8 +29,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SHARD_USE_ADAPTIVE_READS; import static org.apache.flink.streaming.connectors.kinesis.internals.ShardConsumerTestUtils.createFakeShardConsumerMetricGroup; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; /** Tests for {@link PollingRecordPublisherFactory}. */ @@ -49,8 +48,8 @@ public void testBuildPollingRecordPublisher() throws Exception { createFakeShardConsumerMetricGroup(), mock(StreamShardHandle.class)); - assertTrue(recordPublisher instanceof PollingRecordPublisher); - assertFalse(recordPublisher instanceof AdaptivePollingRecordPublisher); + assertThat(recordPublisher).isInstanceOf(PollingRecordPublisher.class); + assertThat(recordPublisher).isNotInstanceOf(AdaptivePollingRecordPublisher.class); } @Test @@ -66,7 +65,7 @@ public void testBuildAdaptivePollingRecordPublisher() throws Exception { createFakeShardConsumerMetricGroup(), mock(StreamShardHandle.class)); - assertTrue(recordPublisher instanceof PollingRecordPublisher); - assertTrue(recordPublisher instanceof AdaptivePollingRecordPublisher); + assertThat(recordPublisher).isInstanceOf(PollingRecordPublisher.class); + assertThat(recordPublisher).isInstanceOf(AdaptivePollingRecordPublisher.class); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java index 2399ec8474796..469c00b8b2134 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/publisher/polling/PollingRecordPublisherTest.java @@ -33,7 +33,7 @@ import static org.apache.flink.streaming.connectors.kinesis.internals.publisher.RecordPublisher.RecordPublisherRunResult.INCOMPLETE; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM; import static org.apache.flink.streaming.connectors.kinesis.testutils.FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.AdditionalMatchers.geq; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -56,9 +56,9 @@ public void testRunPublishesRecordsToConsumer() throws Exception { TestConsumer consumer = new TestConsumer(); recordPublisher.run(consumer); - assertEquals(1, consumer.getRecordBatches().size()); - assertEquals(5, consumer.getRecordBatches().get(0).getDeaggregatedRecordSize()); - assertEquals(100L, consumer.getRecordBatches().get(0).getMillisBehindLatest(), 0); + assertThat(consumer.getRecordBatches()).hasSize(1); + assertThat(consumer.getRecordBatches().get(0).getDeaggregatedRecordSize()).isEqualTo(5); + assertThat(consumer.getRecordBatches().get(0).getMillisBehindLatest()).isEqualTo(100L); } @Test @@ -85,10 +85,10 @@ public void testRunReturnsCompleteWhenShardExpires() throws Exception { PollingRecordPublisher recordPublisher = createPollingRecordPublisher(fakeKinesis); // First call results in INCOMPLETE, there is one batch left - assertEquals(INCOMPLETE, recordPublisher.run(new TestConsumer())); + assertThat(recordPublisher.run(new TestConsumer())).isEqualTo(INCOMPLETE); // After second call the shard is complete - assertEquals(COMPLETE, recordPublisher.run(new TestConsumer())); + assertThat(recordPublisher.run(new TestConsumer())).isEqualTo(COMPLETE); } @Test @@ -96,8 +96,8 @@ public void testRunOnCompletelyConsumedShardReturnsComplete() throws Exception { KinesisProxyInterface fakeKinesis = totalNumOfRecordsAfterNumOfGetRecordsCalls(5, 1, 100); PollingRecordPublisher recordPublisher = createPollingRecordPublisher(fakeKinesis); - assertEquals(COMPLETE, recordPublisher.run(new TestConsumer())); - assertEquals(COMPLETE, recordPublisher.run(new TestConsumer())); + assertThat(recordPublisher.run(new TestConsumer())).isEqualTo(COMPLETE); + assertThat(recordPublisher.run(new TestConsumer())).isEqualTo(COMPLETE); } @Test @@ -106,7 +106,7 @@ public void testRunGetShardIteratorReturnsNullIsComplete() throws Exception { FakeKinesisBehavioursFactory.noShardsFoundForRequestedStreamsBehaviour(); PollingRecordPublisher recordPublisher = createPollingRecordPublisher(fakeKinesis); - assertEquals(COMPLETE, recordPublisher.run(new TestConsumer())); + assertThat(recordPublisher.run(new TestConsumer())).isEqualTo(COMPLETE); } @Test diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporterTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporterTest.java index 9ed4f7e8dae22..af04799a8e045 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporterTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/PollingRecordPublisherMetricsReporterTest.java @@ -25,7 +25,7 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; @@ -59,10 +59,10 @@ public void testGettersAndSetters() { metricsReporter.setRunLoopTimeNanos(4); metricsReporter.setSleepTimeMillis(5); - assertEquals(1, metricsReporter.getBytesPerRead(), 0); - assertEquals(2, metricsReporter.getLoopFrequencyHz(), 0); - assertEquals(3, metricsReporter.getMaxNumberOfRecordsPerFetch()); - assertEquals(4, metricsReporter.getRunLoopTimeNanos()); - assertEquals(5, metricsReporter.getSleepTimeMillis()); + assertThat(metricsReporter.getBytesPerRead()).isEqualTo(1); + assertThat(metricsReporter.getLoopFrequencyHz()).isEqualTo(2); + assertThat(metricsReporter.getMaxNumberOfRecordsPerFetch()).isEqualTo(3); + assertThat(metricsReporter.getRunLoopTimeNanos()).isEqualTo(4); + assertThat(metricsReporter.getSleepTimeMillis()).isEqualTo(5); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporterTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporterTest.java index cdfcd11c5d170..310f9f7f0cb77 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporterTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/metrics/ShardConsumerMetricsReporterTest.java @@ -27,8 +27,7 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.verify; @@ -65,10 +64,10 @@ public void testGettersAndSetters() { metricsReporter.setNumberOfAggregatedRecords(3); metricsReporter.setNumberOfDeaggregatedRecords(4); - assertEquals(1, metricsReporter.getAverageRecordSizeBytes()); - assertEquals(2, metricsReporter.getMillisBehindLatest()); - assertEquals(3, metricsReporter.getNumberOfAggregatedRecords()); - assertEquals(4, metricsReporter.getNumberOfDeaggregatedRecords()); + assertThat(metricsReporter.getAverageRecordSizeBytes()).isEqualTo(1); + assertThat(metricsReporter.getMillisBehindLatest()).isEqualTo(2); + assertThat(metricsReporter.getNumberOfAggregatedRecords()).isEqualTo(3); + assertThat(metricsReporter.getNumberOfDeaggregatedRecords()).isEqualTo(4); } @Test @@ -80,6 +79,6 @@ public void testUnregister() { metricsReporter.unregister(); - assertTrue(metricGroup.isClosed()); + assertThat(metricGroup.isClosed()).isTrue(); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java index 171b678b1181d..275b1847963be 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java @@ -20,9 +20,8 @@ import org.junit.Test; import static org.apache.flink.streaming.connectors.kinesis.model.DynamoDBStreamsShardHandle.SHARDID_PREFIX; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Shard handle unit tests. */ public class DynamoDBStreamsShardHandleTest { @@ -30,27 +29,27 @@ public class DynamoDBStreamsShardHandleTest { public void testIsValidShardId() { // normal form String shardId = "shardId-00000001536805703746-69688cb1"; - assertEquals(true, DynamoDBStreamsShardHandle.isValidShardId(shardId)); + assertThat(DynamoDBStreamsShardHandle.isValidShardId(shardId)).isTrue(); // short form shardId = "shardId-00000001536805703746"; - assertEquals(true, DynamoDBStreamsShardHandle.isValidShardId(shardId)); + assertThat(DynamoDBStreamsShardHandle.isValidShardId(shardId)).isTrue(); // long form shardId = "shardId-00000001536805703746-69688cb1aljkwerijfl8228sl12a123akfla"; - assertEquals(true, DynamoDBStreamsShardHandle.isValidShardId(shardId)); + assertThat(DynamoDBStreamsShardHandle.isValidShardId(shardId)).isTrue(); // invalid with wrong prefix shardId = "sId-00000001536805703746-69688cb1"; - assertEquals(false, DynamoDBStreamsShardHandle.isValidShardId(shardId)); + assertThat(DynamoDBStreamsShardHandle.isValidShardId(shardId)).isFalse(); // invalid with non-digits shardId = "shardId-0000000153680570aabb-69688cb1"; - assertEquals(false, DynamoDBStreamsShardHandle.isValidShardId(shardId)); + assertThat(DynamoDBStreamsShardHandle.isValidShardId(shardId)).isFalse(); // invalid with shardId too long shardId = "shardId-00000001536805703746-69688cb1aljkwerijfl8228sl12a123akfla0000"; - assertEquals(false, DynamoDBStreamsShardHandle.isValidShardId(shardId)); + assertThat(DynamoDBStreamsShardHandle.isValidShardId(shardId)).isFalse(); } @Test @@ -61,7 +60,8 @@ public void testCompareShardId() { String shardIdValid = "shardId-00000001536805703746-69688cb1"; String shardIdInvalid = "shardId-0000000153680570aabb-69688cb1"; - assertEquals(0, DynamoDBStreamsShardHandle.compareShardIds(shardIdValid, shardIdValid)); + assertThat(DynamoDBStreamsShardHandle.compareShardIds(shardIdValid, shardIdValid)) + .isEqualTo(0); // comparison of invalid shardIds should yield exception try { @@ -96,11 +96,12 @@ public void testCompareShardId() { } } for (int i = 1; i < numShardIds - 1; i++) { - assertTrue( - DynamoDBStreamsShardHandle.compareShardIds(shardIds[i - 1], shardIds[i]) < 0); - assertTrue(DynamoDBStreamsShardHandle.compareShardIds(shardIds[i], shardIds[i]) == 0); - assertTrue( - DynamoDBStreamsShardHandle.compareShardIds(shardIds[i], shardIds[i + 1]) < 0); + assertThat(DynamoDBStreamsShardHandle.compareShardIds(shardIds[i - 1], shardIds[i])) + .isLessThan(0); + assertThat(DynamoDBStreamsShardHandle.compareShardIds(shardIds[i], shardIds[i])) + .isEqualTo(0); + assertThat(DynamoDBStreamsShardHandle.compareShardIds(shardIds[i], shardIds[i + 1])) + .isLessThan(0); } } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumberTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumberTest.java index 1f513d8a8729d..ac5616162e6b2 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumberTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/SentinelSequenceNumberTest.java @@ -19,7 +19,7 @@ import org.junit.Test; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link SentinelSequenceNumber}. */ public class SentinelSequenceNumberTest { @@ -27,7 +27,7 @@ public class SentinelSequenceNumberTest { @Test public void allSentinelNumbersAreRecognized() { for (SentinelSequenceNumber sentinel : SentinelSequenceNumber.values()) { - assertTrue(SentinelSequenceNumber.isSentinelSequenceNumber(sentinel.get())); + assertThat(SentinelSequenceNumber.isSentinelSequenceNumber(sentinel.get())).isTrue(); } } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPositionTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPositionTest.java index 9e3b9b2664190..d29744a3f6a28 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPositionTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StartingPositionTest.java @@ -24,8 +24,7 @@ import java.util.Date; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link StartingPosition}. */ public class StartingPositionTest { @@ -36,48 +35,50 @@ public class StartingPositionTest { public void testStartingPositionFromTimestamp() { Date date = new Date(); StartingPosition position = StartingPosition.fromTimestamp(date); - assertEquals(ShardIteratorType.AT_TIMESTAMP, position.getShardIteratorType()); - assertEquals(date, position.getStartingMarker()); + assertThat(position.getShardIteratorType()).isEqualTo(ShardIteratorType.AT_TIMESTAMP); + assertThat(position.getStartingMarker()).isEqualTo(date); } @Test public void testStartingPositionRestartFromSequenceNumber() { SequenceNumber sequenceNumber = new SequenceNumber("100"); StartingPosition position = StartingPosition.restartFromSequenceNumber(sequenceNumber); - assertEquals(ShardIteratorType.AFTER_SEQUENCE_NUMBER, position.getShardIteratorType()); - assertEquals("100", position.getStartingMarker()); + assertThat(position.getShardIteratorType()) + .isEqualTo(ShardIteratorType.AFTER_SEQUENCE_NUMBER); + assertThat(position.getStartingMarker()).isEqualTo("100"); } @Test public void testStartingPositionRestartFromAggregatedSequenceNumber() { SequenceNumber sequenceNumber = new SequenceNumber("200", 3); StartingPosition position = StartingPosition.restartFromSequenceNumber(sequenceNumber); - assertEquals(ShardIteratorType.AT_SEQUENCE_NUMBER, position.getShardIteratorType()); - assertEquals("200", position.getStartingMarker()); + assertThat(position.getShardIteratorType()).isEqualTo(ShardIteratorType.AT_SEQUENCE_NUMBER); + assertThat(position.getStartingMarker()).isEqualTo("200"); } @Test public void testStartingPositionContinueFromAggregatedSequenceNumber() { SequenceNumber sequenceNumber = new SequenceNumber("200", 3); StartingPosition position = StartingPosition.continueFromSequenceNumber(sequenceNumber); - assertEquals(ShardIteratorType.AFTER_SEQUENCE_NUMBER, position.getShardIteratorType()); - assertEquals("200", position.getStartingMarker()); + assertThat(position.getShardIteratorType()) + .isEqualTo(ShardIteratorType.AFTER_SEQUENCE_NUMBER); + assertThat(position.getStartingMarker()).isEqualTo("200"); } @Test public void testStartingPositionRestartFromSentinelEarliest() { SequenceNumber sequenceNumber = SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get(); StartingPosition position = StartingPosition.restartFromSequenceNumber(sequenceNumber); - assertEquals(ShardIteratorType.TRIM_HORIZON, position.getShardIteratorType()); - assertNull(position.getStartingMarker()); + assertThat(position.getShardIteratorType()).isEqualTo(ShardIteratorType.TRIM_HORIZON); + assertThat(position.getStartingMarker()).isNull(); } @Test public void testStartingPositionRestartFromSentinelLatest() { SequenceNumber sequenceNumber = SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM.get(); StartingPosition position = StartingPosition.restartFromSequenceNumber(sequenceNumber); - assertEquals(ShardIteratorType.LATEST, position.getShardIteratorType()); - assertNull(position.getStartingMarker()); + assertThat(position.getShardIteratorType()).isEqualTo(ShardIteratorType.LATEST); + assertThat(position.getStartingMarker()).isNull(); } @Test @@ -87,7 +88,7 @@ public void testStartingPositionRestartFromSentinelEnding() { SequenceNumber sequenceNumber = SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(); StartingPosition position = StartingPosition.restartFromSequenceNumber(sequenceNumber); - assertEquals(ShardIteratorType.LATEST, position.getShardIteratorType()); - assertNull(position.getStartingMarker()); + assertThat(position.getShardIteratorType()).isEqualTo(ShardIteratorType.LATEST); + assertThat(position.getStartingMarker()).isNull(); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandleTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandleTest.java index 43f52048deac5..3dab20a046f43 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandleTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/StreamShardHandleTest.java @@ -19,23 +19,23 @@ import org.junit.Test; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThan; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** Test for methods in the {@link StreamShardHandle} class. */ public class StreamShardHandleTest { @Test public void testCompareShardIds() { assertThat( - StreamShardHandle.compareShardIds("shardId-000000000001", "shardId-000000000010"), - lessThan(0)); + StreamShardHandle.compareShardIds( + "shardId-000000000001", "shardId-000000000010")) + .isLessThan(0); assertThat( - StreamShardHandle.compareShardIds("shardId-000000000010", "shardId-000000000010"), - equalTo(0)); + StreamShardHandle.compareShardIds( + "shardId-000000000010", "shardId-000000000010")) + .isEqualTo(0); assertThat( - StreamShardHandle.compareShardIds("shardId-000000000015", "shardId-000000000010"), - greaterThan(0)); + StreamShardHandle.compareShardIds( + "shardId-000000000015", "shardId-000000000010")) + .isGreaterThan(0); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java index 11da7aedc4540..3f2e40f40417f 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java @@ -42,7 +42,6 @@ import org.apache.http.conn.ConnectTimeoutException; import org.hamcrest.Description; import org.hamcrest.TypeSafeDiagnosingMatcher; -import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; @@ -62,12 +61,11 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.hamcrest.MatcherAssert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.collection.IsCollectionWithSize.hasSize; import static org.hamcrest.collection.IsIterableContainingInAnyOrder.containsInAnyOrder; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -90,7 +88,7 @@ public void testIsRecoverableExceptionWithConnectError() throws UnknownHostExcep InetAddress.getByAddress( "kinesis.us-east-1.amazonaws.com", new byte[] {3, 91, (byte) 171, (byte) 253}))); - assertTrue(kinesisProxy.isRecoverableSdkClientException(ex)); + assertThat(kinesisProxy.isRecoverableSdkClientException(ex)).isTrue(); } @Test @@ -98,28 +96,28 @@ public void testIsRecoverableExceptionWithProvisionedThroughputExceeded() { final ProvisionedThroughputExceededException ex = new ProvisionedThroughputExceededException("asdf"); ex.setErrorType(ErrorType.Client); - assertTrue(KinesisProxy.isRecoverableException(ex)); + assertThat(KinesisProxy.isRecoverableException(ex)).isTrue(); } @Test public void testIsRecoverableExceptionWithServiceException() { final AmazonServiceException ex = new AmazonServiceException("asdf"); ex.setErrorType(ErrorType.Service); - assertTrue(KinesisProxy.isRecoverableException(ex)); + assertThat(KinesisProxy.isRecoverableException(ex)).isTrue(); } @Test public void testIsRecoverableExceptionWithExpiredIteratorException() { final ExpiredIteratorException ex = new ExpiredIteratorException("asdf"); ex.setErrorType(ErrorType.Client); - assertFalse(KinesisProxy.isRecoverableException(ex)); + assertThat(KinesisProxy.isRecoverableException(ex)).isFalse(); } @Test public void testIsRecoverableExceptionWithNullErrorType() { final AmazonServiceException ex = new AmazonServiceException("asdf"); ex.setErrorType(null); - assertFalse(KinesisProxy.isRecoverableException(ex)); + assertThat(KinesisProxy.isRecoverableException(ex)).isFalse(); } @Test @@ -153,8 +151,8 @@ public GetRecordsResult answer(InvocationOnMock invocation) Whitebox.getField(KinesisProxy.class, "kinesisClient").set(kinesisProxy, mockClient); GetRecordsResult result = kinesisProxy.getRecords("fakeShardIterator", 1); - assertEquals(retriableExceptions.length, retries.intValue()); - assertEquals(expectedResult, result); + assertThat(retries.intValue()).isEqualTo(retriableExceptions.length); + assertThat(result).isEqualTo(expectedResult); } @Test @@ -191,15 +189,15 @@ public void testGetShardList() throws Exception { Arrays.asList(fakeStreamName)); GetShardListResult shardListResult = kinesisProxy.getShardList(streamHashMap); - Assert.assertEquals(shardListResult.hasRetrievedShards(), true); + assertThat(true).isEqualTo(shardListResult.hasRetrievedShards()); Set expectedStreams = new HashSet<>(); expectedStreams.add(fakeStreamName); - Assert.assertEquals(shardListResult.getStreamsWithRetrievedShards(), expectedStreams); + assertThat(expectedStreams).isEqualTo(shardListResult.getStreamsWithRetrievedShards()); List actualShardList = shardListResult.getRetrievedShardListOfStream(fakeStreamName); List expectedStreamShard = new ArrayList<>(); - assertThat(actualShardList, hasSize(4)); + assertThat(actualShardList).satisfies(matching(hasSize(4))); for (int i = 0; i < 4; i++) { StreamShardHandle shardHandle = new StreamShardHandle( @@ -210,11 +208,12 @@ public void testGetShardList() throws Exception { expectedStreamShard.add(shardHandle); } - Assert.assertThat( - actualShardList, - containsInAnyOrder( - expectedStreamShard.toArray( - new StreamShardHandle[actualShardList.size()]))); + assertThat(actualShardList) + .satisfies( + matching( + containsInAnyOrder( + expectedStreamShard.toArray( + new StreamShardHandle[actualShardList.size()])))); } @Test @@ -246,15 +245,15 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { GetShardListResult shardListResult = kinesisProxy.getShardList(streamHashMap); // then - Assert.assertTrue(shardListResult.hasRetrievedShards()); + assertThat(shardListResult.hasRetrievedShards()).isTrue(); Set expectedStreams = new HashSet<>(); expectedStreams.add(fakeStreamName); - Assert.assertEquals(shardListResult.getStreamsWithRetrievedShards(), expectedStreams); + assertThat(expectedStreams).isEqualTo(shardListResult.getStreamsWithRetrievedShards()); List actualShardList = shardListResult.getRetrievedShardListOfStream(fakeStreamName); - Assert.assertThat(actualShardList, hasSize(2)); + assertThat(actualShardList).satisfies(matching(hasSize(2))); List expectedStreamShard = IntStream.range(0, actualShardList.size()) @@ -269,11 +268,12 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { i)))) .collect(Collectors.toList()); - Assert.assertThat( - actualShardList, - containsInAnyOrder( - expectedStreamShard.toArray( - new StreamShardHandle[actualShardList.size()]))); + assertThat(actualShardList) + .satisfies( + matching( + containsInAnyOrder( + expectedStreamShard.toArray( + new StreamShardHandle[actualShardList.size()])))); // given new shards ListShardsResult responseSecond = @@ -291,12 +291,12 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { GetShardListResult newShardListResult = kinesisProxy.getShardList(streamHashMap); // then new shards - Assert.assertTrue(newShardListResult.hasRetrievedShards()); - Assert.assertEquals(newShardListResult.getStreamsWithRetrievedShards(), expectedStreams); + assertThat(newShardListResult.hasRetrievedShards()).isTrue(); + assertThat(expectedStreams).isEqualTo(newShardListResult.getStreamsWithRetrievedShards()); List newActualShardList = newShardListResult.getRetrievedShardListOfStream(fakeStreamName); - Assert.assertThat(newActualShardList, hasSize(1)); + assertThat(newActualShardList).satisfies(matching(hasSize(1))); List newExpectedStreamShard = Collections.singletonList( @@ -307,11 +307,13 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { KinesisShardIdGenerator.generateFromShardOrder( 2)))); - Assert.assertThat( - newActualShardList, - containsInAnyOrder( - newExpectedStreamShard.toArray( - new StreamShardHandle[newActualShardList.size()]))); + assertThat(newActualShardList) + .satisfies( + matching( + containsInAnyOrder( + newExpectedStreamShard.toArray( + new StreamShardHandle + [newActualShardList.size()])))); } @Test @@ -337,7 +339,7 @@ public void testGetShardWithNoNewShards() throws Exception { GetShardListResult shardListResult = kinesisProxy.getShardList(streamHashMap); // then - Assert.assertFalse(shardListResult.hasRetrievedShards()); + assertThat(shardListResult.hasRetrievedShards()).isFalse(); } @Test @@ -378,11 +380,10 @@ public ListShardsResult answer(InvocationOnMock invocation) HashMap streamNames = new HashMap(); streamNames.put("fake-stream", null); GetShardListResult result = kinesisProxy.getShardList(streamNames); - assertEquals(retriableExceptions.length, exceptionCount.intValue()); - assertEquals(true, result.hasRetrievedShards()); - assertEquals( - shard.getShardId(), - result.getLastSeenShardOfStream("fake-stream").getShard().getShardId()); + assertThat(exceptionCount.intValue()).isEqualTo(retriableExceptions.length); + assertThat(result.hasRetrievedShards()).isTrue(); + assertThat(result.getLastSeenShardOfStream("fake-stream").getShard().getShardId()) + .isEqualTo(shard.getShardId()); // test max attempt count exceeded int maxRetries = 1; @@ -393,11 +394,11 @@ public ListShardsResult answer(InvocationOnMock invocation) Whitebox.getField(KinesisProxy.class, "kinesisClient").set(kinesisProxy, mockClient); try { kinesisProxy.getShardList(streamNames); - Assert.fail("exception expected"); + fail("exception expected"); } catch (SdkClientException ex) { - assertEquals(retriableExceptions[maxRetries], ex); + assertThat(ex).isEqualTo(retriableExceptions[maxRetries]); } - assertEquals(maxRetries + 1, exceptionCount.intValue()); + assertThat(exceptionCount.intValue()).isEqualTo(maxRetries + 1); } @Test @@ -417,7 +418,7 @@ protected AmazonKinesis createKinesisClient(Properties configProps) { AmazonKinesis kinesisClient = Whitebox.getInternalState(proxy, "kinesisClient"); ClientConfiguration clientConfiguration = Whitebox.getInternalState(kinesisClient, "clientConfiguration"); - assertEquals(10000, clientConfiguration.getSocketTimeout()); + assertThat(clientConfiguration.getSocketTimeout()).isEqualTo(10000); } @Test @@ -432,7 +433,7 @@ public void testClientConfigOverride() { AmazonKinesis kinesisClient = Whitebox.getInternalState(proxy, "kinesisClient"); ClientConfiguration clientConfiguration = Whitebox.getInternalState(kinesisClient, "clientConfiguration"); - assertEquals(9999, clientConfiguration.getSocketTimeout()); + assertThat(clientConfiguration.getSocketTimeout()).isEqualTo(9999); } protected static HashMap diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2FactoryTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2FactoryTest.java index 6095b2017ea4f..fe15035154df9 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2FactoryTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2FactoryTest.java @@ -20,7 +20,6 @@ import org.apache.flink.streaming.connectors.kinesis.config.AWSConfigConstants; import org.apache.flink.streaming.connectors.kinesis.testutils.TestUtils; -import org.junit.Assert; import org.junit.Test; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.internal.NettyConfiguration; @@ -30,7 +29,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.DEFAULT_EFO_HTTP_CLIENT_READ_TIMEOUT; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_HTTP_CLIENT_READ_TIMEOUT_MILLIS; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; /** Test for methods in the {@link KinesisProxyV2Factory} class. */ public class KinesisProxyV2FactoryTest { @@ -42,9 +41,8 @@ public void testReadTimeoutPopulatedFromDefaults() throws Exception { KinesisProxyV2Interface proxy = KinesisProxyV2Factory.createKinesisProxyV2(properties); NettyConfiguration nettyConfiguration = getNettyConfiguration(proxy); - assertEquals( - DEFAULT_EFO_HTTP_CLIENT_READ_TIMEOUT.toMillis(), - nettyConfiguration.readTimeoutMillis()); + assertThat(nettyConfiguration.readTimeoutMillis()) + .isEqualTo(DEFAULT_EFO_HTTP_CLIENT_READ_TIMEOUT.toMillis()); } @Test @@ -55,7 +53,7 @@ public void testReadTimeoutPopulatedFromProperties() throws Exception { KinesisProxyV2Interface proxy = KinesisProxyV2Factory.createKinesisProxyV2(properties); NettyConfiguration nettyConfiguration = getNettyConfiguration(proxy); - assertEquals(12345, nettyConfiguration.readTimeoutMillis()); + assertThat(nettyConfiguration.readTimeoutMillis()).isEqualTo(12345); } @Test @@ -65,7 +63,7 @@ public void testClientConfigurationPopulatedTcpKeepAliveDefaults() throws Except KinesisProxyV2Interface proxy = KinesisProxyV2Factory.createKinesisProxyV2(properties); NettyConfiguration nettyConfiguration = getNettyConfiguration(proxy); - Assert.assertTrue(nettyConfiguration.tcpKeepAlive()); + assertThat(nettyConfiguration.tcpKeepAlive()).isTrue(); } private NettyConfiguration getNettyConfiguration(final KinesisProxyV2Interface kinesis) diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java index 51c4ccfb1eb00..d03ba8cfdd3a9 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyV2Test.java @@ -60,7 +60,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_BASE; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_EXPONENTIAL_CONSTANT; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SUBSCRIBE_TO_SHARD_BACKOFF_MAX; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; @@ -146,11 +146,11 @@ public void testRegisterStreamConsumer() throws Exception { RegisterStreamConsumerResponse actual = proxy.registerStreamConsumer("arn", "name"); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); RegisterStreamConsumerRequest request = requestCaptor.getValue(); - assertEquals("arn", request.streamARN()); - assertEquals("name", request.consumerName()); + assertThat(request.streamARN()).isEqualTo("arn"); + assertThat(request.consumerName()).isEqualTo("name"); } @Test @@ -198,10 +198,10 @@ public void testDeregisterStreamConsumer() throws Exception { DeregisterStreamConsumerResponse actual = proxy.deregisterStreamConsumer("arn"); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); DeregisterStreamConsumerRequest request = requestCaptor.getValue(); - assertEquals("arn", request.consumerARN()); + assertThat(request.consumerARN()).isEqualTo("arn"); } @Test @@ -248,10 +248,10 @@ public void testDescribeStreamConsumerWithStreamConsumerArn() throws Exception { DescribeStreamConsumerResponse actual = proxy.describeStreamConsumer("arn"); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); DescribeStreamConsumerRequest request = requestCaptor.getValue(); - assertEquals("arn", request.consumerARN()); + assertThat(request.consumerARN()).isEqualTo("arn"); } @Test @@ -273,11 +273,11 @@ public void testDescribeStreamConsumerWithStreamArnAndConsumerName() throws Exce DescribeStreamConsumerResponse actual = proxy.describeStreamConsumer("arn", "name"); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); DescribeStreamConsumerRequest request = requestCaptor.getValue(); - assertEquals("arn", request.streamARN()); - assertEquals("name", request.consumerName()); + assertThat(request.streamARN()).isEqualTo("arn"); + assertThat(request.consumerName()).isEqualTo("name"); } @Test @@ -324,10 +324,10 @@ public void testDescribeStreamSummary() throws Exception { DescribeStreamSummaryResponse actual = proxy.describeStreamSummary("stream"); - assertEquals(expected, actual); + assertThat(actual).isEqualTo(expected); DescribeStreamSummaryRequest request = requestCaptor.getValue(); - assertEquals("stream", request.streamName()); + assertThat(request.streamName()).isEqualTo("stream"); } @Test diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisDynamicTableFactoryTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisDynamicTableFactoryTest.java index beb5827fb6227..3db069ca7afc7 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisDynamicTableFactoryTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/table/KinesisDynamicTableFactoryTest.java @@ -56,9 +56,7 @@ import static org.apache.flink.streaming.connectors.kinesis.table.RowDataKinesisDeserializationSchema.Metadata.Timestamp; import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink; import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSource; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; +import static org.assertj.core.api.Assertions.assertThat; /** * Test for {@link KinesisDynamicSource} and {@link KinesisDynamicSink} created by {@link @@ -92,17 +90,17 @@ public void testGoodTableSource() { new TestFormatFactory.DecodingFormatMock(",", true)); // verify that the constructed DynamicTableSink is as expected - assertEquals(expectedSource, actualSource); + assertThat(actualSource).isEqualTo(expectedSource); // verify that the copy of the constructed DynamicTableSink is as expected - assertEquals(expectedSource, actualSource.copy()); + assertThat(actualSource.copy()).isEqualTo(expectedSource); // verify produced sink ScanTableSource.ScanRuntimeProvider functionProvider = actualSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE); SourceFunction sourceFunction = as(functionProvider, SourceFunctionProvider.class).createSourceFunction(); - assertThat(sourceFunction, instanceOf(FlinkKinesisConsumer.class)); + assertThat(sourceFunction).isInstanceOf(FlinkKinesisConsumer.class); } @Test @@ -130,10 +128,10 @@ public void testGoodTableSourceWithMetadataFields() { Arrays.asList(requestedMetadata)); // verify that the constructed DynamicTableSource is as expected - assertEquals(expectedSource, actualSource); + assertThat(actualSource).isEqualTo(expectedSource); // verify that the copy of the constructed DynamicTableSink is as expected - assertEquals(expectedSource, actualSource.copy()); + assertThat(actualSource.copy()).isEqualTo(expectedSource); } @Test @@ -271,7 +269,7 @@ private DataType getProducedType(ResolvedSchema schema, Metadata... requestedMet } private T as(Object object, Class clazz) { - assertThat(object, instanceOf(clazz)); + assertThat(object).isInstanceOf(clazz); return clazz.cast(object); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java index 88da61c3cfae4..17f7d436c9c2c 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/testutils/FakeKinesisFanOutBehavioursFactory.java @@ -54,8 +54,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -417,8 +416,8 @@ public int getNumberOfSubscribeToShardInvocations() { } public StartingPosition getStartingPositionForSubscription(final int subscriptionIndex) { - assertTrue(subscriptionIndex >= 0); - assertTrue(subscriptionIndex < getNumberOfSubscribeToShardInvocations()); + assertThat(subscriptionIndex).isGreaterThanOrEqualTo(0); + assertThat(subscriptionIndex).isLessThan(getNumberOfSubscribeToShardInvocations()); return requests.get(subscriptionIndex).startingPosition(); } @@ -519,7 +518,7 @@ public DescribeStreamSummaryResponse describeStreamSummary(String stream) public RegisterStreamConsumerResponse registerStreamConsumer( String streamArn, String consumerName) throws InterruptedException, ExecutionException { - assertEquals(STREAM_ARN, streamArn); + assertThat(streamArn).isEqualTo(STREAM_ARN); streamConsumerNotFound = false; streamConsumerArn = STREAM_CONSUMER_ARN_NEW; @@ -544,7 +543,7 @@ public DeregisterStreamConsumerResponse deregisterStreamConsumer(final String co public DescribeStreamConsumerResponse describeStreamConsumer( final String streamArn, final String consumerName) throws InterruptedException, ExecutionException { - assertEquals(STREAM_ARN, streamArn); + assertThat(streamArn).isEqualTo(STREAM_ARN); numberOfDescribeStreamConsumerInvocations++; @@ -574,7 +573,7 @@ public DescribeStreamConsumerResponse describeStreamConsumer( @Override public DescribeStreamConsumerResponse describeStreamConsumer(String streamConsumerArn) throws InterruptedException, ExecutionException { - assertEquals(this.streamConsumerArn, streamConsumerArn); + assertThat(streamConsumerArn).isEqualTo(this.streamConsumerArn); return describeStreamConsumer(STREAM_ARN, "consumer-name"); } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java index 94733304b288d..fd2b490b65273 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AWSUtilTest.java @@ -44,10 +44,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM; import static org.apache.flink.streaming.connectors.kinesis.model.SentinelSequenceNumber.SENTINEL_LATEST_SEQUENCE_NUM; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for AWSUtil. */ @RunWith(PowerMockRunner.class) @@ -62,7 +59,7 @@ public void testDefaultCredentialsProvider() { AWSCredentialsProvider credentialsProvider = AWSUtil.getCredentialsProvider(testConfig); - assertTrue(credentialsProvider instanceof DefaultAWSCredentialsProviderChain); + assertThat(credentialsProvider).isInstanceOf(DefaultAWSCredentialsProviderChain.class); } @Test @@ -71,7 +68,7 @@ public void testGetCredentialsProvider() { testConfig.setProperty(AWS_CREDENTIALS_PROVIDER, "WEB_IDENTITY_TOKEN"); AWSCredentialsProvider credentialsProvider = AWSUtil.getCredentialsProvider(testConfig); - assertTrue(credentialsProvider instanceof WebIdentityTokenCredentialsProvider); + assertThat(credentialsProvider).isInstanceOf(WebIdentityTokenCredentialsProvider.class); } @Test @@ -81,7 +78,7 @@ public void testGetCredentialsProviderEnvironmentVariables() { AWSCredentialsProvider credentialsProvider = AWSUtil.getCredentialsProvider(testConfig); - assertTrue(credentialsProvider instanceof EnvironmentVariableCredentialsProvider); + assertThat(credentialsProvider).isInstanceOf(EnvironmentVariableCredentialsProvider.class); } @Test @@ -91,7 +88,7 @@ public void testGetCredentialsProviderSystemProperties() { AWSCredentialsProvider credentialsProvider = AWSUtil.getCredentialsProvider(testConfig); - assertTrue(credentialsProvider instanceof SystemPropertiesCredentialsProvider); + assertThat(credentialsProvider).isInstanceOf(SystemPropertiesCredentialsProvider.class); } @Test @@ -104,8 +101,8 @@ public void testGetCredentialsProviderBasic() { AWSCredentials credentials = AWSUtil.getCredentialsProvider(testConfig).getCredentials(); - assertEquals("ak", credentials.getAWSAccessKeyId()); - assertEquals("sk", credentials.getAWSSecretKey()); + assertThat(credentials.getAWSAccessKeyId()).isEqualTo("ak"); + assertThat(credentials.getAWSSecretKey()).isEqualTo("sk"); } @Test @@ -115,7 +112,7 @@ public void testGetCredentialsProviderAuto() { AWSCredentialsProvider credentialsProvider = AWSUtil.getCredentialsProvider(testConfig); - assertTrue(credentialsProvider instanceof DefaultAWSCredentialsProviderChain); + assertThat(credentialsProvider).isInstanceOf(DefaultAWSCredentialsProviderChain.class); } @Test @@ -139,11 +136,12 @@ public void testGetCredentialsProviderProfile() { AWSCredentialsProvider credentialsProvider = AWSUtil.getCredentialsProvider(testConfig); - assertTrue(credentialsProvider instanceof ProfileCredentialsProvider); + assertThat(credentialsProvider).isInstanceOf(ProfileCredentialsProvider.class); AWSCredentials credentials = credentialsProvider.getCredentials(); - assertEquals("11111111111111111111", credentials.getAWSAccessKeyId()); - assertEquals("wJalrXUtnFEMI/K7MDENG/bPxRfiCY1111111111", credentials.getAWSSecretKey()); + assertThat(credentials.getAWSAccessKeyId()).isEqualTo("11111111111111111111"); + assertThat(credentials.getAWSSecretKey()) + .isEqualTo("wJalrXUtnFEMI/K7MDENG/bPxRfiCY1111111111"); } @Test @@ -157,21 +155,22 @@ public void testGetCredentialsProviderNamedProfile() { AWSCredentialsProvider credentialsProvider = AWSUtil.getCredentialsProvider(testConfig); - assertTrue(credentialsProvider instanceof ProfileCredentialsProvider); + assertThat(credentialsProvider).isInstanceOf(ProfileCredentialsProvider.class); AWSCredentials credentials = credentialsProvider.getCredentials(); - assertEquals("22222222222222222222", credentials.getAWSAccessKeyId()); - assertEquals("wJalrXUtnFEMI/K7MDENG/bPxRfiCY2222222222", credentials.getAWSSecretKey()); + assertThat(credentials.getAWSAccessKeyId()).isEqualTo("22222222222222222222"); + assertThat(credentials.getAWSSecretKey()) + .isEqualTo("wJalrXUtnFEMI/K7MDENG/bPxRfiCY2222222222"); } @Test public void testValidRegion() { - assertTrue(AWSUtil.isValidRegion("us-east-1")); + assertThat(AWSUtil.isValidRegion("us-east-1")).isTrue(); } @Test public void testInvalidRegion() { - assertFalse(AWSUtil.isValidRegion("ur-east-1")); + assertThat(AWSUtil.isValidRegion("ur-east-1")).isFalse(); } @Test @@ -179,8 +178,8 @@ public void testGetStartingPositionForLatest() { StartingPosition position = AWSUtil.getStartingPosition(SENTINEL_LATEST_SEQUENCE_NUM.get(), new Properties()); - assertEquals(AT_TIMESTAMP, position.getShardIteratorType()); - assertNotNull(position.getStartingMarker()); + assertThat(position.getShardIteratorType()).isEqualTo(AT_TIMESTAMP); + assertThat(position.getStartingMarker()).isNotNull(); } @Test @@ -196,7 +195,7 @@ public void testGetStartingPositionForTimestamp() throws Exception { AWSUtil.getStartingPosition( SENTINEL_AT_TIMESTAMP_SEQUENCE_NUM.get(), consumerProperties); - assertEquals(AT_TIMESTAMP, position.getShardIteratorType()); - assertEquals(expectedTimestamp, position.getStartingMarker()); + assertThat(position.getShardIteratorType()).isEqualTo(AT_TIMESTAMP); + assertThat(position.getStartingMarker()).isEqualTo(expectedTimestamp); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java index ea9f03d49729c..79cd7462c23b9 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/AwsV2UtilTest.java @@ -35,9 +35,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.POLLING; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; /** Tests for {@link AwsV2Util}. */ public class AwsV2UtilTest { @@ -50,9 +48,8 @@ public void testConvertMaxConcurrency() { AttributeMap convertedProperties = AwsV2Util.convertProperties(properties); - assertEquals( - maxConcurrency, - convertedProperties.get(SdkHttpConfigurationOption.MAX_CONNECTIONS)); + assertThat(convertedProperties.get(SdkHttpConfigurationOption.MAX_CONNECTIONS)) + .isEqualTo(maxConcurrency); } @Test @@ -65,7 +62,8 @@ public void testConvertReadTimeout() { AttributeMap convertedProperties = AwsV2Util.convertProperties(properties); - assertEquals(readTimeout, convertedProperties.get(SdkHttpConfigurationOption.READ_TIMEOUT)); + assertThat(convertedProperties.get(SdkHttpConfigurationOption.READ_TIMEOUT)) + .isEqualTo(readTimeout); } @Test @@ -74,95 +72,96 @@ public void testConvertEmpty() { AttributeMap convertedProperties = AwsV2Util.convertProperties(properties); - assertEquals(AttributeMap.empty(), convertedProperties); + assertThat(convertedProperties).isEqualTo(AttributeMap.empty()); } @Test public void testIsUsingEfoRecordPublisher() { Properties prop = new Properties(); - assertFalse(AwsV2Util.isUsingEfoRecordPublisher(prop)); + assertThat(AwsV2Util.isUsingEfoRecordPublisher(prop)).isFalse(); prop.setProperty(RECORD_PUBLISHER_TYPE, EFO.name()); - assertTrue(AwsV2Util.isUsingEfoRecordPublisher(prop)); + assertThat(AwsV2Util.isUsingEfoRecordPublisher(prop)).isTrue(); prop.setProperty(RECORD_PUBLISHER_TYPE, POLLING.name()); - assertFalse(AwsV2Util.isUsingEfoRecordPublisher(prop)); + assertThat(AwsV2Util.isUsingEfoRecordPublisher(prop)).isFalse(); } @Test public void testIsEagerEfoRegistrationType() { Properties prop = new Properties(); - assertFalse(AwsV2Util.isEagerEfoRegistrationType(prop)); + assertThat(AwsV2Util.isEagerEfoRegistrationType(prop)).isFalse(); prop.setProperty(EFO_REGISTRATION_TYPE, EAGER.name()); - assertTrue(AwsV2Util.isEagerEfoRegistrationType(prop)); + assertThat(AwsV2Util.isEagerEfoRegistrationType(prop)).isTrue(); prop.setProperty(EFO_REGISTRATION_TYPE, LAZY.name()); - assertFalse(AwsV2Util.isEagerEfoRegistrationType(prop)); + assertThat(AwsV2Util.isEagerEfoRegistrationType(prop)).isFalse(); prop.setProperty(EFO_REGISTRATION_TYPE, NONE.name()); - assertFalse(AwsV2Util.isEagerEfoRegistrationType(prop)); + assertThat(AwsV2Util.isEagerEfoRegistrationType(prop)).isFalse(); } @Test public void testIsLazyEfoRegistrationType() { Properties prop = new Properties(); - assertTrue(AwsV2Util.isLazyEfoRegistrationType(prop)); + assertThat(AwsV2Util.isLazyEfoRegistrationType(prop)).isTrue(); prop.setProperty(EFO_REGISTRATION_TYPE, EAGER.name()); - assertFalse(AwsV2Util.isLazyEfoRegistrationType(prop)); + assertThat(AwsV2Util.isLazyEfoRegistrationType(prop)).isFalse(); prop.setProperty(EFO_REGISTRATION_TYPE, LAZY.name()); - assertTrue(AwsV2Util.isLazyEfoRegistrationType(prop)); + assertThat(AwsV2Util.isLazyEfoRegistrationType(prop)).isTrue(); prop.setProperty(EFO_REGISTRATION_TYPE, NONE.name()); - assertFalse(AwsV2Util.isLazyEfoRegistrationType(prop)); + assertThat(AwsV2Util.isLazyEfoRegistrationType(prop)).isFalse(); } @Test public void testIsNoneEfoRegistrationType() { Properties prop = new Properties(); - assertFalse(AwsV2Util.isNoneEfoRegistrationType(prop)); + assertThat(AwsV2Util.isNoneEfoRegistrationType(prop)).isFalse(); prop.setProperty(EFO_REGISTRATION_TYPE, EAGER.name()); - assertFalse(AwsV2Util.isNoneEfoRegistrationType(prop)); + assertThat(AwsV2Util.isNoneEfoRegistrationType(prop)).isFalse(); prop.setProperty(EFO_REGISTRATION_TYPE, LAZY.name()); - assertFalse(AwsV2Util.isNoneEfoRegistrationType(prop)); + assertThat(AwsV2Util.isNoneEfoRegistrationType(prop)).isFalse(); prop.setProperty(EFO_REGISTRATION_TYPE, NONE.name()); - assertTrue(AwsV2Util.isNoneEfoRegistrationType(prop)); + assertThat(AwsV2Util.isNoneEfoRegistrationType(prop)).isTrue(); } @Test public void testIsRecoverableExceptionForRecoverable() { Exception recoverable = LimitExceededException.builder().build(); - assertTrue(AwsV2Util.isRecoverableException(new ExecutionException(recoverable))); + assertThat(AwsV2Util.isRecoverableException(new ExecutionException(recoverable))).isTrue(); } @Test public void testIsRecoverableExceptionForNonRecoverable() { Exception nonRecoverable = new IllegalArgumentException("abc"); - assertFalse(AwsV2Util.isRecoverableException(new ExecutionException(nonRecoverable))); + assertThat(AwsV2Util.isRecoverableException(new ExecutionException(nonRecoverable))) + .isFalse(); } @Test public void testIsRecoverableExceptionForRuntimeExceptionWrappingRecoverable() { Exception recoverable = LimitExceededException.builder().build(); Exception runtime = new RuntimeException("abc", recoverable); - assertTrue(AwsV2Util.isRecoverableException(runtime)); + assertThat(AwsV2Util.isRecoverableException(runtime)).isTrue(); } @Test public void testIsRecoverableExceptionForRuntimeExceptionWrappingNonRecoverable() { Exception nonRecoverable = new IllegalArgumentException("abc"); Exception runtime = new RuntimeException("abc", nonRecoverable); - assertFalse(AwsV2Util.isRecoverableException(runtime)); + assertThat(AwsV2Util.isRecoverableException(runtime)).isFalse(); } @Test public void testIsRecoverableExceptionForNullCause() { Exception nonRecoverable = new IllegalArgumentException("abc"); - assertFalse(AwsV2Util.isRecoverableException(nonRecoverable)); + assertThat(AwsV2Util.isRecoverableException(nonRecoverable)).isFalse(); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/JobManagerWatermarkTrackerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/JobManagerWatermarkTrackerTest.java index 26a6af2e444fd..3f1daebdfcf3d 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/JobManagerWatermarkTrackerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/JobManagerWatermarkTrackerTest.java @@ -24,10 +24,11 @@ import org.apache.flink.streaming.api.functions.sink.SinkFunction; import org.apache.flink.streaming.api.functions.source.RichSourceFunction; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; +import static org.assertj.core.api.Assertions.assertThat; + /** Test for {@link JobManagerWatermarkTracker}. */ public class JobManagerWatermarkTrackerTest { @@ -40,7 +41,7 @@ public class JobManagerWatermarkTrackerTest { .build()); @Test - public void testUpateWatermark() throws Exception { + public void testUpdateWatermark() throws Exception { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.addSource(new TestSourceFunction(new JobManagerWatermarkTracker("fakeId"))) @@ -64,8 +65,8 @@ public void open(Configuration parameters) throws Exception { @Override public void run(SourceContext ctx) { - Assert.assertEquals(998, tracker.updateWatermark(998)); - Assert.assertEquals(999, tracker.updateWatermark(999)); + assertThat(tracker.updateWatermark(998)).isEqualTo(998); + assertThat(tracker.updateWatermark(999)).isEqualTo(999); } @Override diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java index fa26cfb6565f6..5bbeebdb54b68 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java @@ -41,8 +41,8 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.EFO_HTTP_CLIENT_MAX_CONCURRENCY; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_TIMESTAMP_DATE_FORMAT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; /** Tests for KinesisConfigUtil. */ @RunWith(PowerMockRunner.class) @@ -74,12 +74,12 @@ public void testRateLimitInProducerConfiguration() { KinesisProducerConfiguration kpc = KinesisConfigUtil.getValidatedProducerConfiguration(testConfig); - assertEquals(100, kpc.getRateLimit()); + assertThat(kpc.getRateLimit()).isEqualTo(100); testConfig.setProperty(KinesisConfigUtil.RATE_LIMIT, "150"); kpc = KinesisConfigUtil.getValidatedProducerConfiguration(testConfig); - assertEquals(150, kpc.getRateLimit()); + assertThat(kpc.getRateLimit()).isEqualTo(150); } @Test @@ -89,13 +89,14 @@ public void testThreadingModelInProducerConfiguration() { KinesisProducerConfiguration kpc = KinesisConfigUtil.getValidatedProducerConfiguration(testConfig); - assertEquals(KinesisProducerConfiguration.ThreadingModel.POOLED, kpc.getThreadingModel()); + assertThat(kpc.getThreadingModel()) + .isEqualTo(KinesisProducerConfiguration.ThreadingModel.POOLED); testConfig.setProperty(KinesisConfigUtil.THREADING_MODEL, "PER_REQUEST"); kpc = KinesisConfigUtil.getValidatedProducerConfiguration(testConfig); - assertEquals( - KinesisProducerConfiguration.ThreadingModel.PER_REQUEST, kpc.getThreadingModel()); + assertThat(kpc.getThreadingModel()) + .isEqualTo(KinesisProducerConfiguration.ThreadingModel.PER_REQUEST); } @Test @@ -105,12 +106,12 @@ public void testThreadPoolSizeInProducerConfiguration() { KinesisProducerConfiguration kpc = KinesisConfigUtil.getValidatedProducerConfiguration(testConfig); - assertEquals(10, kpc.getThreadPoolSize()); + assertThat(kpc.getThreadPoolSize()).isEqualTo(10); testConfig.setProperty(KinesisConfigUtil.THREAD_POOL_SIZE, "12"); kpc = KinesisConfigUtil.getValidatedProducerConfiguration(testConfig); - assertEquals(12, kpc.getThreadPoolSize()); + assertThat(kpc.getThreadPoolSize()).isEqualTo(12); } @Test @@ -122,8 +123,10 @@ public void testReplaceDeprecatedKeys() { testConfig.setProperty(ProducerConfigConstants.COLLECTION_MAX_COUNT, "2"); Properties replacedConfig = KinesisConfigUtil.replaceDeprecatedProducerKeys(testConfig); - assertEquals("1", replacedConfig.getProperty(KinesisConfigUtil.AGGREGATION_MAX_COUNT)); - assertEquals("2", replacedConfig.getProperty(KinesisConfigUtil.COLLECTION_MAX_COUNT)); + assertThat(replacedConfig.getProperty(KinesisConfigUtil.AGGREGATION_MAX_COUNT)) + .isEqualTo("1"); + assertThat(replacedConfig.getProperty(KinesisConfigUtil.COLLECTION_MAX_COUNT)) + .isEqualTo("2"); } @Test @@ -134,7 +137,7 @@ public void testCorrectlySetRegionInProducerConfiguration() { KinesisProducerConfiguration kpc = KinesisConfigUtil.getValidatedProducerConfiguration(testConfig); - assertEquals("incorrect region", region, kpc.getRegion()); + assertThat(kpc.getRegion()).as("incorrect region").isEqualTo(region); } @Test @@ -207,7 +210,8 @@ public void testNoRecordPublisherTypeInConfig() { Properties testConfig = TestUtils.getStandardProperties(); ConsumerConfigConstants.RecordPublisherType recordPublisherType = KinesisConfigUtil.validateRecordPublisherType(testConfig); - assertEquals(recordPublisherType, ConsumerConfigConstants.RecordPublisherType.POLLING); + assertThat(ConsumerConfigConstants.RecordPublisherType.POLLING) + .isEqualTo(recordPublisherType); } @Test @@ -485,7 +489,7 @@ public void testDateStringForValidateOptionDateProperty() { KinesisConfigUtil.validateConsumerConfiguration(testConfig); } catch (Exception e) { e.printStackTrace(); - fail(); + fail("unknown failure"); } } @@ -502,7 +506,7 @@ public void testUnixTimestampForValidateOptionDateProperty() { KinesisConfigUtil.validateConsumerConfiguration(testConfig); } catch (Exception e) { e.printStackTrace(); - fail(); + fail("unknown failure"); } } @@ -743,7 +747,7 @@ public void testParseStreamTimestampStartingPositionUsingDefaultFormat() throws Date actualimestamp = KinesisConfigUtil.parseStreamTimestampStartingPosition(consumerProperties); - assertEquals(expectedTimestamp, actualimestamp); + assertThat(actualimestamp).isEqualTo(expectedTimestamp); } @Test @@ -759,7 +763,7 @@ public void testParseStreamTimestampStartingPositionUsingCustomFormat() throws E Date actualimestamp = KinesisConfigUtil.parseStreamTimestampStartingPosition(consumerProperties); - assertEquals(expectedTimestamp, actualimestamp); + assertThat(actualimestamp).isEqualTo(expectedTimestamp); } @Test diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java index 20a5a1aaaacb1..3a4c4f9507a6f 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java @@ -22,7 +22,6 @@ import org.apache.flink.testutils.executor.TestExecutorResource; import org.hamcrest.Matchers; -import org.junit.Assert; import org.junit.ClassRule; import org.junit.Test; @@ -33,6 +32,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.HamcrestCondition.matching; + /** Test for {@link RecordEmitter}. */ public class RecordEmitterTest { @@ -81,7 +83,7 @@ public void test() throws Exception { } emitter.stop(); - Assert.assertThat(emitter.results, Matchers.contains(one, five, two, ten)); + assertThat(emitter.results).satisfies(matching(Matchers.contains(one, five, two, ten))); } @Test @@ -120,7 +122,8 @@ public void testRetainMinAfterReachingLimit() throws Exception { while (emitter.results.size() != 4 && dl.hasTimeLeft()) { Thread.sleep(10); } - Assert.assertThat(emitter.results, Matchers.contains(one, two, three, ten)); + assertThat(emitter.results) + .satisfies(matching(Matchers.contains(one, two, three, ten))); // advance watermark, emits remaining record from queue0 emitter.setCurrentWatermark(10); @@ -128,7 +131,8 @@ public void testRetainMinAfterReachingLimit() throws Exception { while (emitter.results.size() != 5 && dl.hasTimeLeft()) { Thread.sleep(10); } - Assert.assertThat(emitter.results, Matchers.contains(one, two, three, ten, eleven)); + assertThat(emitter.results) + .satisfies(matching(Matchers.contains(one, two, three, ten, eleven))); } finally { emitter.stop(); } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java index a3dd47d946145..47c59f2f61cc4 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/StreamConsumerRegistrarUtilTest.java @@ -31,7 +31,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RECORD_PUBLISHER_TYPE; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.RecordPublisherType.EFO; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.efoConsumerArn; -import static org.junit.Assert.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; @@ -52,8 +52,10 @@ public void testRegisterStreamConsumers() throws Exception { StreamConsumerRegistrarUtil.registerStreamConsumers( registrar, configProps, Arrays.asList("stream-1", "stream-2")); - assertEquals("stream-1-consumer-arn", configProps.getProperty(efoConsumerArn("stream-1"))); - assertEquals("stream-2-consumer-arn", configProps.getProperty(efoConsumerArn("stream-2"))); + assertThat(configProps.getProperty(efoConsumerArn("stream-1"))) + .isEqualTo("stream-1-consumer-arn"); + assertThat(configProps.getProperty(efoConsumerArn("stream-2"))) + .isEqualTo("stream-2-consumer-arn"); } @Test diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/UniformShardAssignerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/UniformShardAssignerTest.java index bbaa0c6e0e1f2..81ed850dcd969 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/UniformShardAssignerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/UniformShardAssignerTest.java @@ -22,7 +22,6 @@ import com.amazonaws.services.kinesis.model.HashKeyRange; import com.amazonaws.services.kinesis.model.Shard; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -31,6 +30,8 @@ import java.math.BigInteger; import java.util.stream.Stream; +import static org.assertj.core.api.Assertions.assertThat; + /** Tests for the {@link UniformShardAssigner}. */ @ExtendWith(TestLoggerExtension.class) public class UniformShardAssignerTest { @@ -68,8 +69,7 @@ public void testAssignment( StreamShardHandle handle = new StreamShardHandle("", shard); // streamName = "" hashes to zero - Assertions.assertEquals( - expectedSubtask, - Math.abs(new UniformShardAssigner().assign(handle, nSubtasks)) % nSubtasks); + assertThat(Math.abs(new UniformShardAssigner().assign(handle, nSubtasks)) % nSubtasks) + .isEqualTo(expectedSubtask); } } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/WatermarkTrackerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/WatermarkTrackerTest.java index fd98d920b12a3..4a71d2c52b125 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/WatermarkTrackerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/WatermarkTrackerTest.java @@ -20,12 +20,13 @@ import org.apache.flink.streaming.util.MockStreamingRuntimeContext; import org.apache.commons.lang3.mutable.MutableLong; -import org.junit.Assert; import org.junit.Test; import java.util.HashMap; import java.util.Map; +import static org.assertj.core.api.Assertions.assertThat; + /** Test for {@link WatermarkTracker}. */ public class WatermarkTrackerTest { @@ -89,17 +90,17 @@ public void test() { long watermark = 0; TestWatermarkTracker ws = new TestWatermarkTracker(); ws.open(new MockStreamingRuntimeContext(false, 1, 0)); - Assert.assertEquals(Long.MIN_VALUE, ws.updateWatermark(Long.MIN_VALUE)); - Assert.assertEquals(Long.MIN_VALUE, ws.updateWatermark(watermark)); + assertThat(ws.updateWatermark(Long.MIN_VALUE)).isEqualTo(Long.MIN_VALUE); + assertThat(ws.updateWatermark(watermark)).isEqualTo(Long.MIN_VALUE); // timeout wm1 clock.add(WatermarkTracker.DEFAULT_UPDATE_TIMEOUT_MILLIS + 1); - Assert.assertEquals(watermark, ws.updateWatermark(watermark)); - Assert.assertEquals(watermark, ws.updateWatermark(watermark - 1)); + assertThat(ws.updateWatermark(watermark)).isEqualTo(watermark); + assertThat(ws.updateWatermark(watermark - 1)).isEqualTo(watermark); // min watermark wm1.watermark = watermark + 1; wm1.lastUpdated = clock.longValue(); - Assert.assertEquals(watermark, ws.updateWatermark(watermark)); - Assert.assertEquals(watermark + 1, ws.updateWatermark(watermark + 1)); + assertThat(ws.updateWatermark(watermark)).isEqualTo(watermark); + assertThat(ws.updateWatermark(watermark + 1)).isEqualTo(watermark + 1); } } From 0541bc17b807599665899212f35e221d31be1ef4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Wed, 11 May 2022 10:26:21 +0200 Subject: [PATCH 08/16] Address connector-base feedback --- .../HybridSourceSplitEnumeratorTest.java | 9 +- .../base/source/hybrid/HybridSourceTest.java | 20 +++-- .../source/reader/SourceMetricsITCase.java | 78 +++++----------- .../reader/fetcher/SplitFetcherTest.java | 4 +- .../flink-test-utils/pom.xml | 6 ++ .../metrics/testutils/MetricAssertions.java | 89 +++++++++++++++++++ .../metrics/testutils/MetricMatchers.java | 85 ------------------ .../streaming/runtime/SinkMetricsITCase.java | 35 +++----- 8 files changed, 146 insertions(+), 180 deletions(-) create mode 100644 flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricAssertions.java delete mode 100644 flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricMatchers.java diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java index ae8e5ef85a14f..242f0f6bb35ca 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceSplitEnumeratorTest.java @@ -38,7 +38,7 @@ import java.util.List; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for {@link HybridSourceSplitEnumerator}. */ public class HybridSourceSplitEnumeratorTest { @@ -152,11 +152,8 @@ public void testHandleSplitRequestAfterSwitchAndReaderReset() { // handleSplitRequest invalid during reset enumerator.addSplitsBack(Collections.singletonList(splitFromSource0), SUBTASK0); - try { - enumerator.handleSplitRequest(SUBTASK0, "fakehostname"); - fail("expected exception"); - } catch (IllegalStateException ex) { - } + assertThatThrownBy(() -> enumerator.handleSplitRequest(SUBTASK0, "fakehostname")) + .isInstanceOf(IllegalStateException.class); } @Test diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java index c9145182fef7b..8cc8dd202b2a8 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/hybrid/HybridSourceTest.java @@ -30,7 +30,7 @@ import java.util.List; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for {@link HybridSource}. */ public class HybridSourceTest { @@ -51,14 +51,16 @@ public void testBoundedness() { .build(); assertThat(source.getBoundedness()).isEqualTo(Boundedness.CONTINUOUS_UNBOUNDED); - try { - HybridSource.builder(new MockBaseSource(1, 1, Boundedness.CONTINUOUS_UNBOUNDED)) - .addSource(new MockBaseSource(1, 1, Boundedness.CONTINUOUS_UNBOUNDED)) - .build(); - fail("expected exception"); - } catch (IllegalArgumentException e) { - // boundedness check to fail - } + assertThatThrownBy( + () -> + HybridSource.builder( + new MockBaseSource( + 1, 1, Boundedness.CONTINUOUS_UNBOUNDED)) + .addSource( + new MockBaseSource( + 1, 1, Boundedness.CONTINUOUS_UNBOUNDED)) + .build()) + .isInstanceOf(IllegalArgumentException.class); } @Test diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java index 3847ffc98dc52..191c383ad1317 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/SourceMetricsITCase.java @@ -44,7 +44,6 @@ import org.apache.flink.testutils.junit.SharedReference; import org.apache.flink.util.TestLogger; -import org.hamcrest.Matcher; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -54,14 +53,9 @@ import java.util.Map; import java.util.concurrent.CyclicBarrier; -import static org.apache.flink.metrics.testutils.MetricMatchers.isCounter; -import static org.apache.flink.metrics.testutils.MetricMatchers.isGauge; +import static org.apache.flink.metrics.testutils.MetricAssertions.assertThatCounter; +import static org.apache.flink.metrics.testutils.MetricAssertions.assertThatGauge; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.Matchers.both; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThan; /** Tests whether all provided metrics of a {@link Source} are of the expected values (FLIP-33). */ public class SourceMetricsITCase extends TestLogger { @@ -176,39 +170,29 @@ private void assertSourceMetrics( // there are only 2 splits assigned; so two groups will not update metrics if (group.getIOMetricGroup().getNumRecordsInCounter().getCount() == 0) { // assert that optional metrics are not initialized when no split assigned - assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) - .satisfies( - matching( - isGauge( - equalTo( - InternalSourceReaderMetricGroup - .UNDEFINED)))); + assertThatGauge(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) + .isEqualTo(InternalSourceReaderMetricGroup.UNDEFINED); assertThat(metrics.get(MetricNames.WATERMARK_LAG)).isNull(); continue; } subtaskWithMetrics++; // I/O metrics - assertThat(group.getIOMetricGroup().getNumRecordsInCounter()) - .satisfies(matching(isCounter(equalTo(processedRecordsPerSubtask)))); - assertThat(group.getIOMetricGroup().getNumBytesInCounter()) - .satisfies( - matching( - isCounter( - equalTo( - processedRecordsPerSubtask - * MockRecordEmitter - .RECORD_SIZE_IN_BYTES)))); + + assertThatCounter(group.getIOMetricGroup().getNumRecordsInCounter()) + .isEqualTo(processedRecordsPerSubtask); + assertThatCounter(group.getIOMetricGroup().getNumBytesInCounter()) + .isEqualTo(processedRecordsPerSubtask * MockRecordEmitter.RECORD_SIZE_IN_BYTES); // MockRecordEmitter is just incrementing errors every even record - assertThat(metrics.get(MetricNames.NUM_RECORDS_IN_ERRORS)) - .satisfies(matching(isCounter(equalTo(processedRecordsPerSubtask / 2)))); + assertThatCounter(metrics.get(MetricNames.NUM_RECORDS_IN_ERRORS)) + .isEqualTo(processedRecordsPerSubtask / 2); if (hasTimestamps) { // Timestamp assigner subtracting EVENTTIME_LAG from wall clock - assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) - .satisfies(matching(isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)))); + assertThatGauge(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) + .isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON); // Watermark is derived from timestamp, so it has to be in the same order of // magnitude - assertThat(metrics.get(MetricNames.WATERMARK_LAG)) - .satisfies(matching(isGauge(isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON)))); + assertThatGauge(metrics.get(MetricNames.WATERMARK_LAG)) + .isCloseTo(EVENTTIME_LAG, EVENTTIME_EPSILON); // Calculate the additional watermark lag (on top of event time lag) Long watermarkLag = ((Gauge) metrics.get(MetricNames.WATERMARK_LAG)).getValue() @@ -218,41 +202,25 @@ private void assertSourceMetrics( .getValue(); // That should correspond to the out-of-order boundedness assertThat(watermarkLag) - .satisfies(matching(isCloseTo(WATERMARK_LAG, WATERMARK_EPSILON))); + .isGreaterThan(WATERMARK_LAG - WATERMARK_EPSILON) + .isLessThan(WATERMARK_LAG + WATERMARK_EPSILON); } else { // assert that optional metrics are not initialized when no timestamp assigned - assertThat(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) - .satisfies( - matching( - isGauge( - equalTo( - InternalSourceReaderMetricGroup - .UNDEFINED)))); + assertThatGauge(metrics.get(MetricNames.CURRENT_EMIT_EVENT_TIME_LAG)) + .isEqualTo(InternalSourceReaderMetricGroup.UNDEFINED); assertThat(metrics.get(MetricNames.WATERMARK_LAG)).isNull(); } long pendingRecords = numTotalPerSubtask - processedRecordsPerSubtask; - assertThat(metrics.get(MetricNames.PENDING_RECORDS)) - .satisfies(matching(isGauge(equalTo(pendingRecords)))); - assertThat(metrics.get(MetricNames.PENDING_BYTES)) - .satisfies( - matching( - isGauge( - equalTo( - pendingRecords - * MockRecordEmitter - .RECORD_SIZE_IN_BYTES)))); + assertThatGauge(metrics.get(MetricNames.PENDING_RECORDS)).isEqualTo(pendingRecords); + assertThatGauge(metrics.get(MetricNames.PENDING_BYTES)) + .isEqualTo(pendingRecords * MockRecordEmitter.RECORD_SIZE_IN_BYTES); // test is keeping source idle time metric busy with the barrier - assertThat(metrics.get(MetricNames.SOURCE_IDLE_TIME)) - .satisfies(matching(isGauge(equalTo(0L)))); + assertThatGauge(metrics.get(MetricNames.SOURCE_IDLE_TIME)).isEqualTo(0L); } assertThat(subtaskWithMetrics).isEqualTo(numSplits); } - private Matcher isCloseTo(long value, long epsilon) { - return both(greaterThan(value - epsilon)).and(lessThan(value + epsilon)); - } - private static class LaggingTimestampAssigner implements SerializableTimestampAssigner { private final long baseTime; diff --git a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java index 00f119662492a..89c4865b4b6f3 100644 --- a/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java +++ b/flink-connectors/flink-connector-base/src/test/java/org/apache/flink/connector/base/source/reader/fetcher/SplitFetcherTest.java @@ -235,8 +235,8 @@ public void run() { } assertThat(recordsRead).hasSize(numTotalRecords); - assertThat((int) recordsRead.first()).isEqualTo(0); - assertThat((int) recordsRead.last()).isEqualTo(numTotalRecords - 1); + assertThat(recordsRead.first()).isEqualTo(0); + assertThat(recordsRead.last()).isEqualTo(numTotalRecords - 1); assertThat(wakeupTimes.get()).isGreaterThan(0); } finally { stop.set(true); diff --git a/flink-test-utils-parent/flink-test-utils/pom.xml b/flink-test-utils-parent/flink-test-utils/pom.xml index 915b1f243becc..b16df155738fd 100644 --- a/flink-test-utils-parent/flink-test-utils/pom.xml +++ b/flink-test-utils-parent/flink-test-utils/pom.xml @@ -99,6 +99,12 @@ under the License. compile + + org.assertj + assertj-core + compile + + org.apache.curator curator-test diff --git a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricAssertions.java b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricAssertions.java new file mode 100644 index 0000000000000..200c8bc94da77 --- /dev/null +++ b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricAssertions.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.metrics.testutils; + +import org.apache.flink.metrics.Counter; +import org.apache.flink.metrics.Gauge; +import org.apache.flink.metrics.Metric; + +import org.assertj.core.api.AbstractAssert; + +import static org.assertj.core.api.Assertions.assertThat; + +/** This class provides access to AssertJ assertions for {@link Metric}s. */ +public class MetricAssertions { + + public static CounterAssert assertThatCounter(Metric actual) { + assertThat(actual).isInstanceOf(Counter.class); + return new CounterAssert((Counter) actual); + } + + public static GaugeAssert assertThatGauge(Metric actual) { + assertThat(actual).isInstanceOf(Gauge.class); + return new GaugeAssert<>((Gauge) actual); + } + + /** This class provides AssertJ assertions for {@link Gauge}s. */ + public static class GaugeAssert extends AbstractAssert, Gauge> { + + GaugeAssert(Gauge actual) { + super(actual, GaugeAssert.class); + } + + /** + * Verifies that the gauges value is equal to the expected one. + * + * @param expected the given value to compare the actual value to. + * @return this assertion object + */ + @Override + public GaugeAssert isEqualTo(Object expected) { + assertThat(actual.getValue()).isEqualTo(expected); + return this; + } + + /** + * Verifies that the gauges value is close to the expected value within a certain deviation. + * + * @param value the expected value + * @param epsilon the maximum deviation from the expected value + * @return this assertion object + */ + public GaugeAssert isCloseTo(long value, long epsilon) { + assertThat((Long) actual.getValue()) + .isGreaterThan(value - epsilon) + .isLessThan(value + epsilon); + return this; + } + } + + /** This class provides AssertJ assertions for {@link Counter}s. */ + public static class CounterAssert extends AbstractAssert { + + CounterAssert(Counter actual) { + super(actual, CounterAssert.class); + } + + @Override + public CounterAssert isEqualTo(Object expected) { + assertThat(actual.getCount()).isEqualTo(expected); + return this; + } + } +} diff --git a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricMatchers.java b/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricMatchers.java deleted file mode 100644 index 8710b6821648f..0000000000000 --- a/flink-test-utils-parent/flink-test-utils/src/main/java/org/apache/flink/metrics/testutils/MetricMatchers.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.flink.metrics.testutils; - -import org.apache.flink.metrics.Counter; -import org.apache.flink.metrics.Gauge; -import org.apache.flink.metrics.Metric; - -import org.hamcrest.Description; -import org.hamcrest.DiagnosingMatcher; -import org.hamcrest.Matcher; -import org.hamcrest.core.IsInstanceOf; - -import java.util.function.Function; - -/** - * Provides hamcrest Matchers for metric assertions. - * - * @see org.apache.flink.metrics.reporter.TestReporter - */ -public enum MetricMatchers { - ; - - public static Matcher isGauge(Matcher valueMatcher) { - return new MetricMatcher<>(Gauge.class, Gauge::getValue, valueMatcher); - } - - public static Matcher isCounter(Matcher valueMatcher) { - return new MetricMatcher<>(Counter.class, Counter::getCount, valueMatcher); - } - - private static class MetricMatcher - extends DiagnosingMatcher { - private final Matcher typeMatcher; - private final Function valueExtractor; - private final Matcher valueMatcher; - private final Class expectedClass; - - MetricMatcher( - Class expectedClass, Function valueExtractor, Matcher valueMatcher) { - this.expectedClass = expectedClass; - this.typeMatcher = new IsInstanceOf(expectedClass); - this.valueExtractor = valueExtractor; - this.valueMatcher = valueMatcher; - } - - @Override - protected boolean matches(Object item, Description mismatchDescription) { - if (!typeMatcher.matches(item)) { - typeMatcher.describeMismatch(item, mismatchDescription); - return false; - } - V value = valueExtractor.apply(expectedClass.cast(item)); - if (!valueMatcher.matches(value)) { - mismatchDescription.appendText(expectedClass.getSimpleName()).appendText(" with "); - valueMatcher.describeMismatch(value, mismatchDescription); - return false; - } - return true; - } - - @Override - public void describeTo(Description description) { - description - .appendText(expectedClass.getSimpleName()) - .appendText(" with ") - .appendDescriptionOf(valueMatcher); - } - } -} diff --git a/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SinkMetricsITCase.java b/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SinkMetricsITCase.java index 90b696dd8ebdf..754d42c1a11ee 100644 --- a/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SinkMetricsITCase.java +++ b/flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SinkMetricsITCase.java @@ -45,8 +45,8 @@ import java.util.concurrent.CyclicBarrier; import java.util.stream.LongStream; -import static org.apache.flink.metrics.testutils.MetricMatchers.isCounter; -import static org.apache.flink.metrics.testutils.MetricMatchers.isGauge; +import static org.apache.flink.metrics.testutils.MetricAssertions.assertThatCounter; +import static org.apache.flink.metrics.testutils.MetricAssertions.assertThatGauge; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.hasSize; @@ -140,29 +140,18 @@ private void assertSinkMetrics( } subtaskWithMetrics++; // SinkWriterMetricGroup metrics - assertThat( - metrics.get(MetricNames.NUM_RECORDS_SEND), - isCounter(equalTo(processedRecordsPerSubtask))); - assertThat( - metrics.get(MetricNames.NUM_BYTES_SEND), - isCounter( - equalTo( - processedRecordsPerSubtask - * MetricWriter.RECORD_SIZE_IN_BYTES))); + assertThatCounter(metrics.get(MetricNames.NUM_RECORDS_SEND)) + .isEqualTo(processedRecordsPerSubtask); + assertThatCounter(metrics.get(MetricNames.NUM_BYTES_SEND)) + .isEqualTo(processedRecordsPerSubtask * MetricWriter.RECORD_SIZE_IN_BYTES); // MetricWriter is just incrementing errors every even record - assertThat( - metrics.get(MetricNames.NUM_RECORDS_OUT_ERRORS), - isCounter(equalTo((processedRecordsPerSubtask + 1) / 2))); - assertThat( - metrics.get(MetricNames.NUM_RECORDS_SEND_ERRORS), - isCounter(equalTo((processedRecordsPerSubtask + 1) / 2))); + assertThatCounter(metrics.get(MetricNames.NUM_RECORDS_OUT_ERRORS)) + .isEqualTo((processedRecordsPerSubtask + 1) / 2); + assertThatCounter(metrics.get(MetricNames.NUM_RECORDS_SEND_ERRORS)) + .isEqualTo((processedRecordsPerSubtask + 1) / 2); // check if the latest send time is fetched - assertThat( - metrics.get(MetricNames.CURRENT_SEND_TIME), - isGauge( - equalTo( - (processedRecordsPerSubtask - 1) - * MetricWriter.BASE_SEND_TIME))); + assertThatGauge(metrics.get(MetricNames.CURRENT_SEND_TIME)) + .isEqualTo((processedRecordsPerSubtask - 1) * MetricWriter.BASE_SEND_TIME); } assertThat(subtaskWithMetrics, equalTo(numSplits)); } From 81a097ac93e6886243429e497f6710214eeecdcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 13 May 2022 15:01:13 +0200 Subject: [PATCH 09/16] Address connector-hive feedback --- .../hive/HiveDeserializeExceptionTest.java | 10 ++--- .../connectors/hive/HiveDialectITCase.java | 39 ++++++++----------- .../hive/HiveDynamicTableFactoryTest.java | 34 ++++++---------- .../connectors/hive/HiveRunnerITCase.java | 34 +++++++--------- .../connectors/hive/HiveTableSinkITCase.java | 30 ++++++++------ .../functions/hive/HiveGenericUDFTest.java | 6 +-- 6 files changed, 65 insertions(+), 88 deletions(-) diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java index bcea45b159493..12560e80ede06 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDeserializeExceptionTest.java @@ -41,7 +41,7 @@ import java.util.Collections; import java.util.Properties; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.Assume.assumeTrue; /** @@ -108,11 +108,7 @@ public void test() throws Exception { ClassLoader parentLoader = object.getClass().getClassLoader().getParent(); assumeTrue(parentLoader != null); byte[] bytes = InstantiationUtil.serializeObject(object); - try { - InstantiationUtil.deserializeObject(bytes, parentLoader); - fail("Exception not thrown"); - } catch (ClassNotFoundException e) { - // expected - } + assertThatThrownBy(() -> InstantiationUtil.deserializeObject(bytes, parentLoader)) + .isInstanceOf(ClassNotFoundException.class); } } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java index b92c6e7600558..9ca1dcc9eb8cc 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDialectITCase.java @@ -77,7 +77,7 @@ import java.util.List; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Test Hive syntax when Hive dialect is used. */ public class HiveDialectITCase { @@ -743,18 +743,18 @@ public void testShowPartitions() throws Exception { "show partitions tbl partition (dt='2020-05-01',country='japan')") .collect()); assertThat(partitions).isEmpty(); - try { - CollectionUtil.iteratorToList( - tableEnv.executeSql( - "show partitions tbl partition (de='2020-04-30',city='china')") - .collect()); - } catch (TableException e) { - assertThat(e) - .hasMessage( - String.format( - "Could not execute SHOW PARTITIONS %s.%s PARTITION (de=2020-04-30, city=china)", - hiveCatalog.getName(), tablePath)); - } + + assertThatThrownBy( + () -> + CollectionUtil.iteratorToList( + tableEnv.executeSql( + "show partitions tbl partition (de='2020-04-30',city='china')") + .collect())) + .isInstanceOf(TableException.class) + .hasMessage( + String.format( + "Could not execute SHOW PARTITIONS %s.%s PARTITION (de=2020-04-30, city=china)", + hiveCatalog.getName(), tablePath)); tableEnv.executeSql( "alter table tbl drop partition (dt='2020-04-30',country='china'),partition (dt='2020-04-30',country='us')"); @@ -817,15 +817,10 @@ public void testUnsupportedOperation() { } private void verifyUnsupportedOperation(String ddl) { - try { - tableEnv.executeSql(ddl); - fail("We don't support " + ddl); - } catch (ValidationException e) { - // expected - assertThat(e.getCause()) - .as("Expect UnsupportedOperationException for " + ddl) - .isInstanceOf(UnsupportedOperationException.class); - } + assertThatThrownBy(() -> tableEnv.executeSql(ddl)) + .isInstanceOf(ValidationException.class) + .getCause() + .isInstanceOf(UnsupportedOperationException.class); } private static String locationPath(String locationURI) throws URISyntaxException { diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java index 03f386c375b00..2703044a140b7 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveDynamicTableFactoryTest.java @@ -33,7 +33,6 @@ import org.apache.flink.table.factories.DynamicTableSinkFactory; import org.apache.flink.table.factories.DynamicTableSourceFactory; import org.apache.flink.table.factories.FactoryUtil; -import org.apache.flink.util.ExceptionUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; @@ -53,6 +52,7 @@ import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_PARTITION_INCLUDE; import static org.apache.flink.connectors.hive.HiveOptions.STREAMING_SOURCE_PARTITION_ORDER; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Unit tests for {@link HiveDynamicTableFactory}. */ public class HiveDynamicTableFactoryTest { @@ -95,16 +95,10 @@ public void testHiveStreamingSourceOptions() throws Exception { STREAMING_SOURCE_ENABLE.key(), STREAMING_SOURCE_PARTITION_INCLUDE.key())); DynamicTableSource tableSource2 = getTableSource("table2"); assertThat(tableSource2).isInstanceOf(HiveLookupTableSource.class); - try { - tableEnv.executeSql("select * from table2"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "The only supported 'streaming-source.partition.include' is 'all' in" - + " hive table scan, but is 'latest'")) - .isPresent(); - } + assertThatThrownBy(() -> tableEnv.executeSql("select * from table2")) + .hasMessage( + "The only supported 'streaming-source.partition.include' is 'all' in" + + " hive table scan, but is 'latest'"); // test table support 'partition-name' in option 'streaming-source.partition.order'. tableEnv.executeSql( @@ -216,7 +210,7 @@ public void testHiveLookupSourceOptions() throws Exception { } @Test - public void testInvalidOptions() throws Exception { + public void testInvalidOptions() { tableEnv.executeSql( String.format( "create table table9 (x int, y string, z int)" @@ -225,17 +219,11 @@ public void testInvalidOptions() throws Exception { STREAMING_SOURCE_PARTITION_INCLUDE.key(), STREAMING_SOURCE_MONITOR_INTERVAL.key(), STREAMING_SOURCE_CONSUME_START_OFFSET.key())); - - try { - getTableSource("table9"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "The 'streaming-source.consume-start-offset' is not supported when " - + "set 'streaming-source.partition.include' to 'latest'")) - .isPresent(); - } + assertThatThrownBy(() -> getTableSource("table9")) + .getRootCause() + .hasMessage( + "The 'streaming-source.consume-start-offset' is not supported when " + + "set 'streaming-source.partition.include' to 'latest'"); } @Test diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java index 69a292a6efa3c..03138093cafd0 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveRunnerITCase.java @@ -64,6 +64,7 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TXN_MANAGER; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** * Tests that need to run with hive runner. Since hive runner is heavy, make sure to add test cases @@ -570,25 +571,20 @@ private void testTransactionalTable(boolean batch) { tableEnv.executeSql("create table db1.src (x string,y string)"); hiveShell.execute( "create table db1.dest (x string,y string) clustered by (x) into 3 buckets stored as orc tblproperties ('transactional'='true')"); - List exceptions = new ArrayList<>(); - try { - tableEnv.executeSql("insert into db1.src select * from db1.dest").await(); - } catch (Exception e) { - exceptions.add(e); - } - try { - tableEnv.executeSql("insert into db1.dest select * from db1.src").await(); - } catch (Exception e) { - exceptions.add(e); - } - assertThat(exceptions).hasSize(2); - exceptions.forEach( - e -> { - assertThat(e).isInstanceOf(FlinkHiveException.class); - assertThat(e.getMessage()) - .isEqualTo( - "Reading or writing ACID table db1.dest is not supported."); - }); + assertThatThrownBy( + () -> + tableEnv.executeSql( + "insert into db1.src select * from db1.dest") + .await()) + .isInstanceOf(FlinkHiveException.class) + .hasMessage("Reading or writing ACID table db1.dest is not supported."); + assertThatThrownBy( + () -> + tableEnv.executeSql( + "insert into db1.dest select * from db1.src") + .await()) + .isInstanceOf(FlinkHiveException.class) + .hasMessage("Reading or writing ACID table db1.dest is not supported."); } finally { tableEnv.executeSql("drop database db1 cascade"); } diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java index e5dc44ca25f7e..fda56a08e6221 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java @@ -39,7 +39,6 @@ import org.apache.flink.types.Row; import org.apache.flink.util.CloseableIterator; import org.apache.flink.util.CollectionUtil; -import org.apache.flink.util.ExceptionUtils; import org.apache.flink.shaded.guava30.com.google.common.collect.Lists; @@ -72,6 +71,7 @@ import static org.apache.flink.table.planner.utils.TableTestUtil.replaceStageId; import static org.apache.flink.table.planner.utils.TableTestUtil.replaceStreamNodeId; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.Assertions.fail; /** Tests {@link HiveTableSink}. */ @@ -427,17 +427,23 @@ public void testStreamingSinkWithoutCommitPolicy() throws Exception { public void testCustomPartitionCommitPolicyNotFound() { String customCommitPolicyClassName = "NotExistPartitionCommitPolicyClass"; - try { - testStreamingWriteWithCustomPartitionCommitPolicy(customCommitPolicyClassName); - fail("ExecutionException expected"); - } catch (Exception e) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - e, - "Can not create new instance for custom class from " - + customCommitPolicyClassName)) - .isPresent(); - } + assertThatThrownBy( + () -> + testStreamingWriteWithCustomPartitionCommitPolicy( + customCommitPolicyClassName)) + .satisfies( + (Consumer) + throwable -> { + List messages = new ArrayList<>(); + while (throwable != null) { + messages.add(throwable.getMessage()); + throwable = throwable.getCause(); + } + assertThat(messages) + .contains( + "Can not create new instance for custom class from " + + customCommitPolicyClassName); + }); } @Test diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java index 5cca17f793639..cabc275dee561 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/table/functions/hive/HiveGenericUDFTest.java @@ -45,14 +45,11 @@ import java.math.BigDecimal; import java.sql.Date; import java.sql.Timestamp; -import java.util.Arrays; import java.util.HashMap; import static org.apache.flink.table.HiveVersionTestUtil.HIVE_230_OR_LATER; import static org.apache.flink.table.HiveVersionTestUtil.HIVE_310_OR_LATER; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.Matchers.containsInAnyOrder; /** Test for {@link HiveGenericUDF}. */ public class HiveGenericUDFTest { @@ -256,8 +253,7 @@ public void testMap() { Object[] result = (Object[]) udf2.eval(udf.eval(testInput)); - assertThat(result).hasSize(3); - assertThat(Arrays.asList(result)).satisfies(matching(containsInAnyOrder("1", "2", "3"))); + assertThat(result).hasSize(3).containsExactlyInAnyOrder("1", "2", "3"); } @Test From fffd289bbeca72097c59404612023ee4b4aa6787 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Fri, 13 May 2022 15:07:46 +0200 Subject: [PATCH 10/16] Address connector-cassandra feedback --- .../connectors/cassandra/CassandraConnectorITCase.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java index 57fc14c50d8a2..563c5c446d8fe 100644 --- a/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java +++ b/flink-connectors/flink-connector-cassandra/src/test/java/org/apache/flink/streaming/connectors/cassandra/CassandraConnectorITCase.java @@ -60,6 +60,7 @@ import com.datastax.driver.mapping.Mapper; import com.datastax.driver.mapping.annotations.Table; import net.bytebuddy.ByteBuddy; +import org.assertj.core.api.recursive.comparison.RecursiveComparisonConfiguration; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.junit.AfterClass; @@ -93,8 +94,6 @@ import scala.collection.Seq; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.Matchers.samePropertyValuesAs; /** IT cases for all cassandra sinks. */ @SuppressWarnings("serial") @@ -725,7 +724,12 @@ public void testCassandraBatchPojoFormat() throws Exception { final List result = readPojosWithInputFormat(annotatedPojoClass); assertThat(result).hasSize(20); - assertThat(result).satisfies(matching(samePropertyValuesAs(pojos))); + assertThat(result) + .usingRecursiveComparison( + RecursiveComparisonConfiguration.builder() + .withIgnoreCollectionOrder(true) + .build()) + .isEqualTo(pojos); } @Test From d277994af144dbc235cae5bafbefb491c1404b56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Wed, 18 May 2022 10:51:22 +0200 Subject: [PATCH 11/16] Address connector-hive feedback 2 --- .../connectors/hive/HiveTableSinkITCase.java | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java index fda56a08e6221..26adb22180d31 100644 --- a/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java +++ b/flink-connectors/flink-connector-hive/src/test/java/org/apache/flink/connectors/hive/HiveTableSinkITCase.java @@ -431,19 +431,9 @@ public void testCustomPartitionCommitPolicyNotFound() { () -> testStreamingWriteWithCustomPartitionCommitPolicy( customCommitPolicyClassName)) - .satisfies( - (Consumer) - throwable -> { - List messages = new ArrayList<>(); - while (throwable != null) { - messages.add(throwable.getMessage()); - throwable = throwable.getCause(); - } - assertThat(messages) - .contains( - "Can not create new instance for custom class from " - + customCommitPolicyClassName); - }); + .hasStackTraceContaining( + "Can not create new instance for custom class from " + + customCommitPolicyClassName); } @Test From a54ec8472a087f0c2d77844b06ac755977531ecf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Wed, 18 May 2022 11:59:25 +0200 Subject: [PATCH 12/16] Fix connector-jdbc assert.fail --- .../connector/jdbc/JdbcDataTypeTest.java | 16 +- .../jdbc/JdbcRowOutputFormatTest.java | 25 ++- .../table/JdbcDynamicTableFactoryTest.java | 187 +++++++----------- .../jdbc/xa/JdbcXaSinkDerbyTest.java | 10 +- 4 files changed, 86 insertions(+), 152 deletions(-) diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java index 6dc47d27c5fb7..71726ae7d4aef 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcDataTypeTest.java @@ -19,7 +19,6 @@ package org.apache.flink.connector.jdbc; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; -import org.apache.flink.table.api.ValidationException; import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; import org.junit.Test; @@ -31,8 +30,9 @@ import java.util.Arrays; import java.util.List; +import static org.apache.flink.core.testutils.FlinkAssertions.anyCauseMatches; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for all DataTypes and Dialects of JDBC connector. */ @RunWith(Parameterized.class) @@ -190,16 +190,8 @@ public void testDataTypeValidate() { tEnv.executeSql(sqlDDL); if (testItem.expectError != null) { - try { - tEnv.sqlQuery("SELECT * FROM T"); - fail("unknown failure"); - } catch (ValidationException ex) { - assertThat(ex.getCause()).hasMessage(testItem.expectError); - } catch (UnsupportedOperationException ex) { - assertThat(ex).hasMessage(testItem.expectError); - } catch (Exception e) { - fail("unknown failure", e); - } + assertThatThrownBy(() -> tEnv.sqlQuery("SELECT * FROM T")) + .satisfies(anyCauseMatches(testItem.expectError)); } else { tEnv.sqlQuery("SELECT * FROM T"); } diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java index 13142b93b58f7..b0d80db937d29 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/JdbcRowOutputFormatTest.java @@ -44,10 +44,11 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.SELECT_ALL_NEWBOOKS_3; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; import static org.apache.flink.connector.jdbc.JdbcTestFixture.TestEntry; +import static org.apache.flink.core.testutils.FlinkAssertions.anyCauseMatches; import static org.apache.flink.util.ExceptionUtils.findThrowable; import static org.apache.flink.util.ExceptionUtils.findThrowableWithMessage; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Tests for the {@link JdbcRowOutputFormat}. */ public class JdbcRowOutputFormatTest extends JdbcDataTestBase { @@ -82,19 +83,15 @@ public void testInvalidDriver() { @Test public void testInvalidURL() { String expectedMsg = "No suitable driver found for jdbc:der:iamanerror:mory:ebookshop"; - try { - jdbcOutputFormat = - JdbcRowOutputFormat.buildJdbcOutputFormat() - .setDrivername(DERBY_EBOOKSHOP_DB.getDriverClass()) - .setDBUrl("jdbc:der:iamanerror:mory:ebookshop") - .setQuery(String.format(INSERT_TEMPLATE, INPUT_TABLE)) - .finish(); - jdbcOutputFormat.open(0, 1); - fail("expect exception"); - } catch (Exception e) { - assertThat(findThrowable(e, IOException.class)).isPresent(); - assertThat(findThrowableWithMessage(e, expectedMsg)).isPresent(); - } + + jdbcOutputFormat = + JdbcRowOutputFormat.buildJdbcOutputFormat() + .setDrivername(DERBY_EBOOKSHOP_DB.getDriverClass()) + .setDBUrl("jdbc:der:iamanerror:mory:ebookshop") + .setQuery(String.format(INSERT_TEMPLATE, INPUT_TABLE)) + .finish(); + assertThatThrownBy(() -> jdbcOutputFormat.open(0, 1)) + .satisfies(anyCauseMatches(IOException.class, expectedMsg)); } @Test diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java index 6140dacb8bac9..e25b65f97a33d 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/table/JdbcDynamicTableFactoryTest.java @@ -29,7 +29,6 @@ import org.apache.flink.table.catalog.UniqueConstraint; import org.apache.flink.table.connector.sink.DynamicTableSink; import org.apache.flink.table.connector.source.DynamicTableSource; -import org.apache.flink.util.ExceptionUtils; import org.junit.Test; @@ -41,7 +40,7 @@ import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSink; import static org.apache.flink.table.factories.utils.FactoryMocks.createTableSource; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** * Test for {@link JdbcDynamicTableSource} and {@link JdbcDynamicTableSink} created by {@link @@ -256,136 +255,88 @@ public void testJDBCSinkWithParallelism() { @Test public void testJdbcValidation() { // only password, no username - try { - Map properties = getAllOptions(); - properties.put("password", "pass"); - - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "Either all or none of the following options should be provided:\n" - + "username\npassword")) - .isPresent(); - } + Map properties = getAllOptions(); + properties.put("password", "pass"); + + Map finalProperties = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties)) + .hasStackTraceContaining( + "Either all or none of the following options should be provided:\n" + + "username\npassword"); // read partition properties not complete - try { - Map properties = getAllOptions(); - properties.put("scan.partition.column", "aaa"); - properties.put("scan.partition.lower-bound", "-10"); - properties.put("scan.partition.upper-bound", "100"); - - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "Either all or none of the following options should be provided:\n" - + "scan.partition.column\n" - + "scan.partition.num\n" - + "scan.partition.lower-bound\n" - + "scan.partition.upper-bound")) - .isPresent(); - } + properties = getAllOptions(); + properties.put("scan.partition.column", "aaa"); + properties.put("scan.partition.lower-bound", "-10"); + properties.put("scan.partition.upper-bound", "100"); + + Map finalProperties1 = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties1)) + .hasStackTraceContaining( + "Either all or none of the following options should be provided:\n" + + "scan.partition.column\n" + + "scan.partition.num\n" + + "scan.partition.lower-bound\n" + + "scan.partition.upper-bound"); // read partition lower-bound > upper-bound - try { - Map properties = getAllOptions(); - properties.put("scan.partition.column", "aaa"); - properties.put("scan.partition.lower-bound", "100"); - properties.put("scan.partition.upper-bound", "-10"); - properties.put("scan.partition.num", "10"); - - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "'scan.partition.lower-bound'='100' must not be larger than " - + "'scan.partition.upper-bound'='-10'.")) - .isPresent(); - } + properties = getAllOptions(); + properties.put("scan.partition.column", "aaa"); + properties.put("scan.partition.lower-bound", "100"); + properties.put("scan.partition.upper-bound", "-10"); + properties.put("scan.partition.num", "10"); + + Map finalProperties2 = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties2)) + .hasStackTraceContaining( + "'scan.partition.lower-bound'='100' must not be larger than " + + "'scan.partition.upper-bound'='-10'."); // lookup cache properties not complete - try { - Map properties = getAllOptions(); - properties.put("lookup.cache.max-rows", "10"); - - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "Either all or none of the following options should be provided:\n" - + "lookup.cache.max-rows\n" - + "lookup.cache.ttl")) - .isPresent(); - } + properties = getAllOptions(); + properties.put("lookup.cache.max-rows", "10"); + + Map finalProperties3 = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties3)) + .hasStackTraceContaining( + "Either all or none of the following options should be provided:\n" + + "lookup.cache.max-rows\n" + + "lookup.cache.ttl"); // lookup cache properties not complete - try { - Map properties = getAllOptions(); - properties.put("lookup.cache.ttl", "1s"); - - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "Either all or none of the following options should be provided:\n" - + "lookup.cache.max-rows\n" - + "lookup.cache.ttl")) - .isPresent(); - } + properties = getAllOptions(); + properties.put("lookup.cache.ttl", "1s"); + + Map finalProperties4 = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties4)) + .hasStackTraceContaining( + "Either all or none of the following options should be provided:\n" + + "lookup.cache.max-rows\n" + + "lookup.cache.ttl"); // lookup retries shouldn't be negative - try { - Map properties = getAllOptions(); - properties.put("lookup.max-retries", "-1"); - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "The value of 'lookup.max-retries' option shouldn't be negative, but is -1.")) - .isPresent(); - } + properties = getAllOptions(); + properties.put("lookup.max-retries", "-1"); + Map finalProperties5 = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties5)) + .hasStackTraceContaining( + "The value of 'lookup.max-retries' option shouldn't be negative, but is -1."); // sink retries shouldn't be negative - try { - Map properties = getAllOptions(); - properties.put("sink.max-retries", "-1"); - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "The value of 'sink.max-retries' option shouldn't be negative, but is -1.")) - .isPresent(); - } + properties = getAllOptions(); + properties.put("sink.max-retries", "-1"); + Map finalProperties6 = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties6)) + .hasStackTraceContaining( + "The value of 'sink.max-retries' option shouldn't be negative, but is -1."); // connection.max-retry-timeout shouldn't be smaller than 1 second - try { - Map properties = getAllOptions(); - properties.put("connection.max-retry-timeout", "100ms"); - createTableSource(SCHEMA, properties); - fail("exception expected"); - } catch (Throwable t) { - assertThat( - ExceptionUtils.findThrowableWithMessage( - t, - "The value of 'connection.max-retry-timeout' option must be in second granularity and shouldn't be smaller than 1 second, but is 100ms.")) - .isPresent(); - } + properties = getAllOptions(); + properties.put("connection.max-retry-timeout", "100ms"); + Map finalProperties7 = properties; + assertThatThrownBy(() -> createTableSource(SCHEMA, finalProperties7)) + .hasStackTraceContaining( + "The value of 'connection.max-retry-timeout' option must be in second granularity and shouldn't be smaller than 1 second, but is 100ms."); } @Test diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java index b788565419409..39eef2cfc1274 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/xa/JdbcXaSinkDerbyTest.java @@ -25,7 +25,7 @@ import static org.apache.flink.connector.jdbc.JdbcTestFixture.TEST_DATA; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** * {@link JdbcXaSinkFunction} tests using Derby DB. Derby supports XA but doesn't use MVCC, so we @@ -159,13 +159,7 @@ public void testFailurePropagation() throws Exception { new TestXaSinkStateHandler()); sinkHelper.emit(TEST_DATA[0]); sinkHelper.emit(TEST_DATA[0]); // duplicate - try { - sinkHelper.snapshotState(0); - } catch (Exception e) { - // expected: flush or commit duplicated records - return; - } - fail("should propagate error from snapshotState"); + assertThatThrownBy(() -> sinkHelper.snapshotState(0)).isInstanceOf(Exception.class); } static EmbeddedXADataSource derbyXaDs() { From d1f48f4dbdf84e480d1740fbbef0503503bbc760 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Wed, 18 May 2022 14:30:50 +0200 Subject: [PATCH 13/16] Fix connector-kinesis assert.fail --- .../kinesis/FlinkKinesisConsumerTest.java | 5 +- .../kinesis/FlinkKinesisProducerTest.java | 47 +++---------------- .../internals/KinesisDataFetcherTest.java | 19 ++------ .../model/DynamoDBStreamsShardHandleTest.java | 27 ++++++----- .../kinesis/proxy/KinesisProxyTest.java | 12 ++--- .../kinesis/util/KinesisConfigUtilTest.java | 18 ++----- 6 files changed, 36 insertions(+), 92 deletions(-) diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java index c817ef1b98e67..403ff340ef1e5 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java @@ -91,7 +91,6 @@ import java.util.function.Supplier; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; @@ -1206,9 +1205,7 @@ private void awaitRecordCount(ConcurrentLinkedQueue queue, int } int received = queue.size(); - if (received < count) { - fail(String.format("Timeout waiting for records, received %d/%d", received, count)); - } + assertThat(received).isEqualTo(count); } private static class OpenCheckingStringSchema extends SimpleStringSchema { diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java index 3f7ad267ad45d..8d41c86bacc89 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisProducerTest.java @@ -31,7 +31,6 @@ import org.apache.flink.streaming.util.AbstractStreamOperatorTestHarness; import org.apache.flink.streaming.util.MockSerializationSchema; import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness; -import org.apache.flink.util.ExceptionUtils; import org.apache.flink.util.InstantiationUtil; import org.apache.flink.util.TestLogger; @@ -52,7 +51,7 @@ import java.util.Properties; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; @@ -132,18 +131,8 @@ public void testAsyncErrorRethrownOnInvoke() throws Throwable { .get(0) .setException(new Exception("artificial async exception")); - try { - testHarness.processElement(new StreamRecord<>("msg-2")); - } catch (Exception e) { - // the next invoke should rethrow the async exception - assertThat(ExceptionUtils.findThrowableWithMessage(e, "artificial async exception")) - .isPresent(); - - // test succeeded - return; - } - - fail("unknown failure"); + assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>("msg-2"))) + .hasStackTraceContaining("artificial async exception"); } /** @@ -167,18 +156,8 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable { .get(0) .setException(new Exception("artificial async exception")); - try { - testHarness.snapshot(123L, 123L); - } catch (Exception e) { - // the next checkpoint should rethrow the async exception - assertThat(ExceptionUtils.findThrowableWithMessage(e, "artificial async exception")) - .isPresent(); - - // test succeeded - return; - } - - fail("unknown failure"); + assertThatThrownBy(() -> testHarness.snapshot(123L, 123L)) + .hasStackTraceContaining("artificial async exception"); } /** @@ -226,20 +205,8 @@ public void go() throws Exception { .setException(new Exception("artificial async failure for 2nd message")); producer.getPendingRecordFutures().get(2).set(mock(UserRecordResult.class)); - try { - snapshotThread.sync(); - } catch (Exception e) { - // after the flush, the async exception should have been rethrown - assertThat( - ExceptionUtils.findThrowableWithMessage( - e, "artificial async failure for 2nd message")) - .isPresent(); - - // test succeeded - return; - } - - fail("unknown failure"); + assertThatThrownBy(snapshotThread::sync) + .hasStackTraceContaining("artificial async failure for 2nd message"); } /** diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java index 6130d18cb7460..02b41a81b8403 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/internals/KinesisDataFetcherTest.java @@ -73,7 +73,7 @@ import static java.util.Collections.singletonList; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.SHARD_DISCOVERY_INTERVAL_MILLIS; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -1004,20 +1004,9 @@ public void go() throws Exception { // InterruptedException. consumerThread.interrupt(); - try { - consumerThread.sync(); - } catch (InterruptedException e) { - fail( - "Expected exception from deserializer, but got InterruptedException, probably from " - + "KinesisDataFetcher, which obscures the cause of the failure. " - + e); - } catch (RuntimeException e) { - if (!e.getMessage().equals(AlwaysThrowsDeserializationSchema.EXCEPTION_MESSAGE)) { - fail("Expected exception from deserializer, but got: " + e); - } - } catch (Exception e) { - fail("Expected exception from deserializer, but got: " + e); - } + assertThatThrownBy(consumerThread::sync) + .isInstanceOf(RuntimeException.class) + .hasMessage(AlwaysThrowsDeserializationSchema.EXCEPTION_MESSAGE); assertThat(fetcher.wasInterrupted) .as( diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java index 275b1847963be..61dd1b4b4d6be 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/model/DynamoDBStreamsShardHandleTest.java @@ -21,7 +21,7 @@ import static org.apache.flink.streaming.connectors.kinesis.model.DynamoDBStreamsShardHandle.SHARDID_PREFIX; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; /** Shard handle unit tests. */ public class DynamoDBStreamsShardHandleTest { @@ -64,18 +64,19 @@ public void testCompareShardId() { .isEqualTo(0); // comparison of invalid shardIds should yield exception - try { - DynamoDBStreamsShardHandle.compareShardIds(shardIdValid, shardIdInvalid); - fail("invalid shard Id" + shardIdInvalid + " should trigger exception"); - } catch (IllegalArgumentException e) { - // expected - } - try { - DynamoDBStreamsShardHandle.compareShardIds(shardIdInvalid, shardIdValid); - fail("invalid shard Id" + shardIdInvalid + " should trigger exception"); - } catch (IllegalArgumentException e) { - // expected - } + assertThatThrownBy( + () -> + DynamoDBStreamsShardHandle.compareShardIds( + shardIdValid, shardIdInvalid)) + .as("invalid shard Id" + shardIdInvalid + " should trigger exception") + .isInstanceOf(IllegalArgumentException.class); + + assertThatThrownBy( + () -> + DynamoDBStreamsShardHandle.compareShardIds( + shardIdInvalid, shardIdValid)) + .as("invalid shard Id" + shardIdInvalid + " should trigger exception") + .isInstanceOf(IllegalArgumentException.class); // compare randomly generated shardIds based on timestamp String[] shardIds = new String[numShardIds]; diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java index 3f2e40f40417f..0541c00d8b477 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java @@ -62,7 +62,7 @@ import java.util.stream.IntStream; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.assertj.core.api.HamcrestCondition.matching; import static org.hamcrest.collection.IsCollectionWithSize.hasSize; import static org.hamcrest.collection.IsIterableContainingInAnyOrder.containsInAnyOrder; @@ -392,12 +392,10 @@ public ListShardsResult answer(InvocationOnMock invocation) ConsumerConfigConstants.LIST_SHARDS_RETRIES, String.valueOf(maxRetries)); kinesisProxy = new KinesisProxy(kinesisConsumerConfig); Whitebox.getField(KinesisProxy.class, "kinesisClient").set(kinesisProxy, mockClient); - try { - kinesisProxy.getShardList(streamNames); - fail("exception expected"); - } catch (SdkClientException ex) { - assertThat(ex).isEqualTo(retriableExceptions[maxRetries]); - } + KinesisProxy finalKinesisProxy = kinesisProxy; + assertThatThrownBy(() -> finalKinesisProxy.getShardList(streamNames)) + .isInstanceOf(SdkClientException.class) + .isEqualTo(retriableExceptions[maxRetries]); assertThat(exceptionCount.intValue()).isEqualTo(maxRetries + 1); } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java index 5bbeebdb54b68..c9811d7481af5 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/KinesisConfigUtilTest.java @@ -42,7 +42,7 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP; import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_TIMESTAMP_DATE_FORMAT; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.fail; +import static org.assertj.core.api.Assertions.assertThatNoException; /** Tests for KinesisConfigUtil. */ @RunWith(PowerMockRunner.class) @@ -485,12 +485,8 @@ public void testDateStringForValidateOptionDateProperty() { testConfig.setProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION, "AT_TIMESTAMP"); testConfig.setProperty(ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP, timestamp); - try { - KinesisConfigUtil.validateConsumerConfiguration(testConfig); - } catch (Exception e) { - e.printStackTrace(); - fail("unknown failure"); - } + assertThatNoException() + .isThrownBy(() -> KinesisConfigUtil.validateConsumerConfiguration(testConfig)); } @Test @@ -502,12 +498,8 @@ public void testUnixTimestampForValidateOptionDateProperty() { testConfig.setProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION, "AT_TIMESTAMP"); testConfig.setProperty(ConsumerConfigConstants.STREAM_INITIAL_TIMESTAMP, unixTimestamp); - try { - KinesisConfigUtil.validateConsumerConfiguration(testConfig); - } catch (Exception e) { - e.printStackTrace(); - fail("unknown failure"); - } + assertThatNoException() + .isThrownBy(() -> KinesisConfigUtil.validateConsumerConfiguration(testConfig)); } @Test From c7d4f56cab57849631cfcb4637a606d11522fc4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Thu, 19 May 2022 10:41:08 +0200 Subject: [PATCH 14/16] Address connector-jdbc feedback 2 --- .../jdbc/split/NumericBetweenParametersProviderTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java index 4698c34f42ca3..cb1ebf3f9185e 100644 --- a/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java +++ b/flink-connectors/flink-connector-jdbc/src/test/java/org/apache/flink/connector/jdbc/split/NumericBetweenParametersProviderTest.java @@ -116,9 +116,9 @@ public void testBatchNumTooLarge() { } private void check(long[][] expected, Serializable[][] actual) { - assertThat(actual).hasDimensions(expected.length, 2); + assertThat(actual).hasDimensions(expected.length, expected[0].length); for (int i = 0; i < expected.length; i++) { - for (int j = 0; j < 2; j++) { + for (int j = 0; j < expected[i].length; j++) { assertThat(((Long) actual[i][j]).longValue()).isEqualTo(expected[i][j]); } } From 0802f8e8e0ad92795637ad50e6f05d25e9da938c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Thu, 19 May 2022 11:02:53 +0200 Subject: [PATCH 15/16] Address connector-kafka feedback --- .../connector/kafka/sink/KafkaSinkITCase.java | 56 ++++++------------- .../kafka/sink/KafkaTransactionLogITCase.java | 34 +++++------ .../source/reader/KafkaSourceReaderTest.java | 12 +--- 3 files changed, 33 insertions(+), 69 deletions(-) diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java index d7f59c91b578e..be8180ee39812 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaSinkITCase.java @@ -111,8 +111,6 @@ import static org.apache.flink.util.DockerImageVersions.KAFKA; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.fail; -import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.Matchers.contains; /** Tests for using KafkaSink writing to a Kafka cluster. */ public class KafkaSinkITCase extends TestLogger { @@ -230,17 +228,10 @@ public void testRecoveryWithExactlyOnceGuarantee() throws Exception { 1, (records) -> assertThat(records) - .satisfies( - matching( - contains( - LongStream.range( - 1, - lastCheckpointedRecord - .get() - .get() - + 1) - .boxed() - .toArray())))); + .contains( + (LongStream.range(1, lastCheckpointedRecord.get().get() + 1) + .boxed() + .toArray(Long[]::new)))); } @Test @@ -250,17 +241,10 @@ public void testRecoveryWithExactlyOnceGuaranteeAndConcurrentCheckpoints() throw 2, (records) -> assertThat(records) - .satisfies( - matching( - contains( - LongStream.range( - 1, - lastCheckpointedRecord - .get() - .get() - + 1) - .boxed() - .toArray())))); + .contains( + LongStream.range(1, lastCheckpointedRecord.get().get() + 1) + .boxed() + .toArray(Long[]::new))); } @Test @@ -294,12 +278,10 @@ public void testAbortTransactionsOfPendingCheckpointsAfterFailure() throws Excep final List> collectedRecords = drainAllRecordsFromTopic(topic, true); assertThat(deserializeValues(collectedRecords)) - .satisfies( - matching( - contains( - LongStream.range(1, lastCheckpointedRecord.get().get() + 1) - .boxed() - .toArray()))); + .contains( + LongStream.range(1, lastCheckpointedRecord.get().get() + 1) + .boxed() + .toArray(Long[]::new)); } @Test @@ -323,12 +305,10 @@ public void testAbortTransactionsAfterScaleInBeforeFirstCheckpoint() throws Exce final List> collectedRecords = drainAllRecordsFromTopic(topic, true); assertThat(deserializeValues(collectedRecords)) - .satisfies( - matching( - contains( - LongStream.range(1, lastCheckpointedRecord.get().get() + 1) - .boxed() - .toArray()))); + .contains( + LongStream.range(1, lastCheckpointedRecord.get().get() + 1) + .boxed() + .toArray(Long[]::new)); } private void executeWithMapper( @@ -418,9 +398,7 @@ private void writeRecordsToKafka( final long recordsCount = expectedRecords.get().get(); assertThat(recordsCount).isEqualTo(collectedRecords.size()); assertThat(deserializeValues(collectedRecords)) - .satisfies( - matching( - contains(LongStream.range(1, recordsCount + 1).boxed().toArray()))); + .contains(LongStream.range(1, recordsCount + 1).boxed().toArray(Long[]::new)); checkProducerLeak(); } diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java index 7b0884e494fed..84d732dbf5967 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/sink/KafkaTransactionLogITCase.java @@ -47,8 +47,6 @@ import static org.apache.flink.connector.kafka.testutils.KafkaUtil.createKafkaContainer; import static org.apache.flink.util.DockerImageVersions.KAFKA; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.Matchers.containsInAnyOrder; /** Tests for {@link KafkaTransactionLog} to retrieve abortable Kafka transactions. */ public class KafkaTransactionLogITCase extends TestLogger { @@ -79,25 +77,19 @@ public void testGetTransactions() { new KafkaTransactionLog(getKafkaClientConfiguration()); final List transactions = transactionLog.getTransactions(); assertThat(transactions) - .satisfies( - matching( - containsInAnyOrder( - new TransactionRecord(buildTransactionalId(1), Empty), - new TransactionRecord(buildTransactionalId(1), Ongoing), - new TransactionRecord( - buildTransactionalId(1), PrepareCommit), - new TransactionRecord( - buildTransactionalId(1), CompleteCommit), - new TransactionRecord(buildTransactionalId(2), Empty), - new TransactionRecord(buildTransactionalId(2), Ongoing), - new TransactionRecord( - buildTransactionalId(2), PrepareAbort), - new TransactionRecord( - buildTransactionalId(2), CompleteAbort), - new TransactionRecord(buildTransactionalId(3), Empty), - new TransactionRecord(buildTransactionalId(3), Ongoing), - new TransactionRecord(buildTransactionalId(4), Empty), - new TransactionRecord(buildTransactionalId(4), Ongoing)))); + .containsExactlyInAnyOrder( + new TransactionRecord(buildTransactionalId(1), Empty), + new TransactionRecord(buildTransactionalId(1), Ongoing), + new TransactionRecord(buildTransactionalId(1), PrepareCommit), + new TransactionRecord(buildTransactionalId(1), CompleteCommit), + new TransactionRecord(buildTransactionalId(2), Empty), + new TransactionRecord(buildTransactionalId(2), Ongoing), + new TransactionRecord(buildTransactionalId(2), PrepareAbort), + new TransactionRecord(buildTransactionalId(2), CompleteAbort), + new TransactionRecord(buildTransactionalId(3), Empty), + new TransactionRecord(buildTransactionalId(3), Ongoing), + new TransactionRecord(buildTransactionalId(4), Empty), + new TransactionRecord(buildTransactionalId(4), Ongoing)); } private void committedTransaction(long id) { diff --git a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java index 9e6c1fc4faa17..89a06faaeb0b8 100644 --- a/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java +++ b/flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/connector/kafka/source/reader/KafkaSourceReaderTest.java @@ -50,7 +50,6 @@ import org.apache.kafka.common.serialization.IntegerDeserializer; import org.apache.kafka.common.serialization.IntegerSerializer; import org.apache.kafka.common.serialization.StringSerializer; -import org.hamcrest.Matchers; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -80,7 +79,6 @@ import static org.apache.flink.connector.kafka.testutils.KafkaSourceTestEnv.NUM_PARTITIONS; import static org.apache.flink.core.testutils.CommonTestUtils.waitUtil; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.HamcrestCondition.matching; /** Unit tests for {@link KafkaSourceReader}. */ public class KafkaSourceReaderTest extends SourceReaderTestBase { @@ -394,13 +392,9 @@ void testAssigningEmptySplits() throws Exception { () -> reader.getNumAliveFetchers() == 0, "The split fetcher did not exit before timeout."); assertThat(finishedSplits) - .satisfies( - matching( - Matchers.containsInAnyOrder( - KafkaPartitionSplit.toSplitId( - normalSplit.getTopicPartition()), - KafkaPartitionSplit.toSplitId( - emptySplit.getTopicPartition())))); + .containsExactlyInAnyOrder( + KafkaPartitionSplit.toSplitId(normalSplit.getTopicPartition()), + KafkaPartitionSplit.toSplitId(emptySplit.getTopicPartition())); } } From e88e5721f7b6c1762026e25c8117054d7dc5b60c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Preu=C3=9F?= <11444089+alpreu@users.noreply.github.com> Date: Thu, 19 May 2022 11:19:08 +0200 Subject: [PATCH 16/16] Address connector-kinesis feeback --- .../kinesis/FlinkKinesisConsumerTest.java | 20 +++--------- .../kinesis/FlinkKinesisITCase.java | 5 +-- .../kinesis/proxy/KinesisProxyTest.java | 32 ++++++------------- .../kinesis/util/RecordEmitterTest.java | 10 ++---- 4 files changed, 19 insertions(+), 48 deletions(-) diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java index 403ff340ef1e5..65211ecff66e3 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisConsumerTest.java @@ -29,7 +29,6 @@ import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.api.java.typeutils.runtime.PojoSerializer; import org.apache.flink.configuration.Configuration; -import org.apache.flink.metrics.MetricGroup; import org.apache.flink.mock.Whitebox; import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.runtime.state.StateInitializationContext; @@ -92,7 +91,6 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -981,11 +979,7 @@ public void markAsTemporarilyIdle() {} testHarness.close(); assertThat(testHarness.getOutput()).as("record count").hasSize(recordCount); - assertThat(watermarks) - .satisfies( - matching( - org.hamcrest.Matchers.contains( - new Watermark(-3), new Watermark(5)))); + assertThat(watermarks).contains(new Watermark(-3), new Watermark(5)); assertThat(watermarks).as("watermark count").hasSize(watermarkCount); } @@ -1137,8 +1131,7 @@ public void emitWatermark(Watermark mark) { expectedResults.add(new Watermark(-4)); // verify watermark awaitRecordCount(results, expectedResults.size()); - assertThat(results) - .satisfies(matching(org.hamcrest.Matchers.contains(expectedResults.toArray()))); + assertThat(results).contains(expectedResults); assertThat(TestWatermarkTracker.WATERMARK.get()).isEqualTo(0); // trigger sync @@ -1171,15 +1164,13 @@ public void emitWatermark(Watermark mark) { testHarness.setProcessingTime(testHarness.getProcessingTime() + 1); expectedResults.add(Long.toString(record2)); awaitRecordCount(results, expectedResults.size()); - assertThat(results) - .satisfies(matching(org.hamcrest.Matchers.contains(expectedResults.toArray()))); + assertThat(results).contains(expectedResults); TestWatermarkTracker.assertGlobalWatermark(3000); // Trigger watermark update and emit testHarness.setProcessingTime(testHarness.getProcessingTime() + autoWatermarkInterval); expectedResults.add(new Watermark(3000)); - assertThat(results) - .satisfies(matching(org.hamcrest.Matchers.contains(expectedResults.toArray()))); + assertThat(results).contains(expectedResults); // verify exception propagation assertThat(sourceThreadError.get()).isNull(); @@ -1213,8 +1204,7 @@ private static class OpenCheckingStringSchema extends SimpleStringSchema { @Override public void open(DeserializationSchema.InitializationContext context) throws Exception { - assertThat(context.getMetricGroup()) - .satisfies(matching(notNullValue(MetricGroup.class))); + assertThat(context.getMetricGroup()).isNotNull(); this.opened = true; } diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java index 9f0914e80721e..f02bb5b066e7b 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/FlinkKinesisITCase.java @@ -49,9 +49,6 @@ import static org.apache.flink.streaming.connectors.kinesis.config.ConsumerConfigConstants.STREAM_INITIAL_POSITION; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.lessThan; /** IT cases for using Kinesis consumer/producer based on Kinesalite. */ @Ignore("See FLINK-23528") @@ -123,7 +120,7 @@ public void testStopWithSavepoint() throws Exception { List result = stream.executeAndCollect(10000); // stop with savepoint will most likely only return a small subset of the elements // validate that the prefix is as expected - assertThat(result).satisfies(matching(hasSize(lessThan(numElements)))); + assertThat(result).size().isLessThan(numElements); assertThat(result) .isEqualTo( IntStream.range(0, numElements) diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java index 0541c00d8b477..6c3c563b55c31 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/proxy/KinesisProxyTest.java @@ -63,9 +63,6 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; -import static org.assertj.core.api.HamcrestCondition.matching; -import static org.hamcrest.collection.IsCollectionWithSize.hasSize; -import static org.hamcrest.collection.IsIterableContainingInAnyOrder.containsInAnyOrder; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -197,7 +194,7 @@ public void testGetShardList() throws Exception { List actualShardList = shardListResult.getRetrievedShardListOfStream(fakeStreamName); List expectedStreamShard = new ArrayList<>(); - assertThat(actualShardList).satisfies(matching(hasSize(4))); + assertThat(actualShardList).hasSize(4); for (int i = 0; i < 4; i++) { StreamShardHandle shardHandle = new StreamShardHandle( @@ -209,11 +206,8 @@ public void testGetShardList() throws Exception { } assertThat(actualShardList) - .satisfies( - matching( - containsInAnyOrder( - expectedStreamShard.toArray( - new StreamShardHandle[actualShardList.size()])))); + .containsExactlyInAnyOrder( + expectedStreamShard.toArray(new StreamShardHandle[actualShardList.size()])); } @Test @@ -253,7 +247,7 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { List actualShardList = shardListResult.getRetrievedShardListOfStream(fakeStreamName); - assertThat(actualShardList).satisfies(matching(hasSize(2))); + assertThat(actualShardList).hasSize(2); List expectedStreamShard = IntStream.range(0, actualShardList.size()) @@ -269,11 +263,8 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { .collect(Collectors.toList()); assertThat(actualShardList) - .satisfies( - matching( - containsInAnyOrder( - expectedStreamShard.toArray( - new StreamShardHandle[actualShardList.size()])))); + .containsExactlyInAnyOrder( + expectedStreamShard.toArray(new StreamShardHandle[actualShardList.size()])); // given new shards ListShardsResult responseSecond = @@ -296,7 +287,7 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { List newActualShardList = newShardListResult.getRetrievedShardListOfStream(fakeStreamName); - assertThat(newActualShardList).satisfies(matching(hasSize(1))); + assertThat(newActualShardList).hasSize(1); List newExpectedStreamShard = Collections.singletonList( @@ -308,12 +299,9 @@ public void testGetShardListWithNewShardsOnSecondRun() throws Exception { 2)))); assertThat(newActualShardList) - .satisfies( - matching( - containsInAnyOrder( - newExpectedStreamShard.toArray( - new StreamShardHandle - [newActualShardList.size()])))); + .containsExactlyInAnyOrder( + newExpectedStreamShard.toArray( + new StreamShardHandle[newActualShardList.size()])); } @Test diff --git a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java index 3a4c4f9507a6f..8348cadb4c7b1 100644 --- a/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java +++ b/flink-connectors/flink-connector-kinesis/src/test/java/org/apache/flink/streaming/connectors/kinesis/util/RecordEmitterTest.java @@ -21,7 +21,6 @@ import org.apache.flink.streaming.runtime.operators.windowing.TimestampedValue; import org.apache.flink.testutils.executor.TestExecutorResource; -import org.hamcrest.Matchers; import org.junit.ClassRule; import org.junit.Test; @@ -33,7 +32,6 @@ import java.util.concurrent.Executors; import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.HamcrestCondition.matching; /** Test for {@link RecordEmitter}. */ public class RecordEmitterTest { @@ -83,7 +81,7 @@ public void test() throws Exception { } emitter.stop(); - assertThat(emitter.results).satisfies(matching(Matchers.contains(one, five, two, ten))); + assertThat(emitter.results).contains(one, five, two, ten); } @Test @@ -122,8 +120,7 @@ public void testRetainMinAfterReachingLimit() throws Exception { while (emitter.results.size() != 4 && dl.hasTimeLeft()) { Thread.sleep(10); } - assertThat(emitter.results) - .satisfies(matching(Matchers.contains(one, two, three, ten))); + assertThat(emitter.results).contains(one, two, three, ten); // advance watermark, emits remaining record from queue0 emitter.setCurrentWatermark(10); @@ -131,8 +128,7 @@ public void testRetainMinAfterReachingLimit() throws Exception { while (emitter.results.size() != 5 && dl.hasTimeLeft()) { Thread.sleep(10); } - assertThat(emitter.results) - .satisfies(matching(Matchers.contains(one, two, three, ten, eleven))); + assertThat(emitter.results).contains(one, two, three, ten, eleven); } finally { emitter.stop(); }