fields = dataSet.next().getFields();
+ final int regionId = fields.get(regionIdIndex).getIntV();
+ final String regionStatus = fields.get(regionStatusIndex).toString();
+ if (!"Running".equals(regionStatus)) {
+ result.putIfAbsent(regionId, regionStatus);
+ }
+ }
+ return result;
}
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java
index da6bf2dfdc6f1..23829ae31ee45 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateClusterCrashIT.java
@@ -19,7 +19,7 @@
package org.apache.iotdb.confignode.it.regionmigration.pass;
-import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
import org.apache.iotdb.confignode.procedure.state.AddRegionPeerState;
import org.apache.iotdb.confignode.procedure.state.RemoveRegionPeerState;
import org.apache.iotdb.it.framework.IoTDBTestRunner;
@@ -31,7 +31,7 @@
@Category({DailyIT.class})
@RunWith(IoTDBTestRunner.class)
-public class IoTDBRegionMigrateClusterCrashIT extends IoTDBRegionMigrateReliabilityITFramework {
+public class IoTDBRegionMigrateClusterCrashIT extends IoTDBRegionOperationReliabilityITFramework {
@Test
public void clusterCrash1() throws Exception {
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java
index f6916eff40282..72d8d129cfce6 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateConfigNodeCrashIT.java
@@ -21,7 +21,7 @@
import org.apache.iotdb.commons.utils.KillPoint.KillNode;
import org.apache.iotdb.commons.utils.KillPoint.KillPoint;
-import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
import org.apache.iotdb.confignode.procedure.state.AddRegionPeerState;
import org.apache.iotdb.confignode.procedure.state.RegionTransitionState;
import org.apache.iotdb.confignode.procedure.state.RemoveRegionPeerState;
@@ -39,7 +39,8 @@
@Category({DailyIT.class})
@RunWith(IoTDBTestRunner.class)
-public class IoTDBRegionMigrateConfigNodeCrashIT extends IoTDBRegionMigrateReliabilityITFramework {
+public class IoTDBRegionMigrateConfigNodeCrashIT
+ extends IoTDBRegionOperationReliabilityITFramework {
@Test
@Ignore
public void cnCrashDuringPreCheckTest() throws Exception {
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java
index f56296b9cfaea..e2d6a9db57ba2 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateNormalIT.java
@@ -20,7 +20,7 @@
package org.apache.iotdb.confignode.it.regionmigration.pass;
import org.apache.iotdb.commons.utils.KillPoint.KillNode;
-import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
import org.apache.iotdb.it.framework.IoTDBTestRunner;
import org.apache.iotdb.itbase.category.ClusterIT;
@@ -30,7 +30,7 @@
@Category({ClusterIT.class})
@RunWith(IoTDBTestRunner.class)
-public class IoTDBRegionMigrateNormalIT extends IoTDBRegionMigrateReliabilityITFramework {
+public class IoTDBRegionMigrateNormalIT extends IoTDBRegionOperationReliabilityITFramework {
@Test
public void normal1C2DTest() throws Exception {
successTest(1, 1, 1, 2, noKillPoints(), noKillPoints(), KillNode.ALL_NODES);
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java
index f4ca461edd8b6..b5b2f7759e739 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/IoTDBRegionMigrateOtherIT.java
@@ -21,7 +21,7 @@
import org.apache.iotdb.commons.utils.KillPoint.KillNode;
import org.apache.iotdb.commons.utils.KillPoint.NeverTriggeredKillPoint;
-import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
import org.apache.iotdb.it.framework.IoTDBTestRunner;
import org.apache.iotdb.itbase.category.ClusterIT;
@@ -32,7 +32,7 @@
@RunWith(IoTDBTestRunner.class)
@Category({ClusterIT.class})
-public class IoTDBRegionMigrateOtherIT extends IoTDBRegionMigrateReliabilityITFramework {
+public class IoTDBRegionMigrateOtherIT extends IoTDBRegionOperationReliabilityITFramework {
@Test
public void badKillPoint() throws Exception {
try {
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionGroupExpandAndShrinkForIoTV1IT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionGroupExpandAndShrinkForIoTV1IT.java
new file mode 100644
index 0000000000000..3c4aa16a401e4
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionGroupExpandAndShrinkForIoTV1IT.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.it.regionmigration.pass.commit;
+
+import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
+import org.apache.iotdb.confignode.rpc.thrift.TShowRegionResp;
+import org.apache.iotdb.consensus.ConsensusFactory;
+import org.apache.iotdb.it.env.EnvFactory;
+import org.apache.iotdb.it.framework.IoTDBTestRunner;
+import org.apache.iotdb.itbase.category.ClusterIT;
+
+import org.awaitility.Awaitility;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
+
+import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly;
+
+@Category({ClusterIT.class})
+@RunWith(IoTDBTestRunner.class)
+public class IoTDBRegionGroupExpandAndShrinkForIoTV1IT
+ extends IoTDBRegionOperationReliabilityITFramework {
+ private static final String EXPAND_FORMAT = "extend region %d to %d";
+ private static final String SHRINK_FORMAT = "remove region %d from %d";
+
+ private static Logger LOGGER =
+ LoggerFactory.getLogger(IoTDBRegionGroupExpandAndShrinkForIoTV1IT.class);
+
+ /**
+ * 1. Expand: {a} -> {a,b} -> ... -> {a,b,c,d,e}
+ *
+ * 2. Check
+ *
+ *
3. Shrink: {a,b,c,d,e} -> {a,c,d,e} -> ... -> {d}
+ *
+ *
4. Check
+ */
+ @Test
+ public void normal1C5DTest() throws Exception {
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setDataReplicationFactor(1)
+ .setSchemaReplicationFactor(1);
+
+ EnvFactory.getEnv().initClusterEnvironment(1, 5);
+
+ try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection());
+ final Statement statement = makeItCloseQuietly(connection.createStatement());
+ SyncConfigNodeIServiceClient client =
+ (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) {
+ // prepare data
+ statement.execute(INSERTION1);
+ statement.execute(FLUSH_COMMAND);
+
+ // collect necessary information
+ Map> regionMap = getAllRegionMap(statement);
+ Set allDataNodeId = getAllDataNodes(statement);
+
+ // expect one data region, one schema region
+ Assert.assertEquals(2, regionMap.size());
+
+ // expand
+ for (int selectedRegion : regionMap.keySet()) {
+ for (int i = 0; i < 4; i++) {
+ int targetDataNode =
+ selectDataNodeNotContainsRegion(allDataNodeId, regionMap, selectedRegion);
+ regionGroupExpand(statement, client, selectedRegion, targetDataNode);
+ // update regionMap every time
+ regionMap = getAllRegionMap(statement);
+ }
+ }
+
+ // shrink
+ for (int selectedRegion : regionMap.keySet()) {
+ for (int i = 0; i < 4; i++) {
+ int targetDataNode =
+ selectDataNodeContainsRegion(allDataNodeId, regionMap, selectedRegion);
+ regionGroupShrink(statement, client, selectedRegion, targetDataNode);
+ // update regionMap every time
+ regionMap = getAllRegionMap(statement);
+ }
+ }
+ }
+ }
+
+ private void regionGroupExpand(
+ Statement statement,
+ SyncConfigNodeIServiceClient client,
+ int selectedRegion,
+ int targetDataNode)
+ throws Exception {
+ Awaitility.await()
+ .atMost(10, TimeUnit.SECONDS)
+ .pollInterval(1, TimeUnit.SECONDS)
+ .until(
+ () -> {
+ statement.execute(String.format(EXPAND_FORMAT, selectedRegion, targetDataNode));
+ return true;
+ });
+
+ Predicate expandRegionPredicate =
+ tShowRegionResp -> {
+ Map> newRegionMap =
+ getRunningRegionMap(tShowRegionResp.getRegionInfoList());
+ Set dataNodes = newRegionMap.get(selectedRegion);
+ return dataNodes.contains(targetDataNode);
+ };
+
+ awaitUntilSuccess(client, expandRegionPredicate, Optional.of(targetDataNode), Optional.empty());
+
+ LOGGER.info("Region {} has expanded to DataNode {}", selectedRegion, targetDataNode);
+ }
+
+ private void regionGroupShrink(
+ Statement statement,
+ SyncConfigNodeIServiceClient client,
+ int selectedRegion,
+ int targetDataNode)
+ throws Exception {
+ Awaitility.await()
+ .atMost(10, TimeUnit.SECONDS)
+ .pollInterval(1, TimeUnit.SECONDS)
+ .until(
+ () -> {
+ statement.execute(String.format(SHRINK_FORMAT, selectedRegion, targetDataNode));
+ return true;
+ });
+
+ Predicate shrinkRegionPredicate =
+ tShowRegionResp -> {
+ Map> newRegionMap =
+ getRegionMap(tShowRegionResp.getRegionInfoList());
+ Set dataNodes = newRegionMap.get(selectedRegion);
+ return !dataNodes.contains(targetDataNode);
+ };
+
+ awaitUntilSuccess(client, shrinkRegionPredicate, Optional.empty(), Optional.of(targetDataNode));
+
+ LOGGER.info("Region {} has shrunk from DataNode {}", selectedRegion, targetDataNode);
+ }
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionReconstructForIoTV1IT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionReconstructForIoTV1IT.java
new file mode 100644
index 0000000000000..9c720bfbc2746
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/commit/IoTDBRegionReconstructForIoTV1IT.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.it.regionmigration.pass.commit;
+
+import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient;
+import org.apache.iotdb.commons.cluster.NodeStatus;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
+import org.apache.iotdb.consensus.ConsensusFactory;
+import org.apache.iotdb.isession.SessionDataSet;
+import org.apache.iotdb.it.env.EnvFactory;
+import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper;
+import org.apache.iotdb.it.framework.IoTDBTestRunner;
+import org.apache.iotdb.itbase.category.ClusterIT;
+import org.apache.iotdb.rpc.StatementExecutionException;
+import org.apache.iotdb.session.Session;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.tsfile.read.common.RowRecord;
+import org.awaitility.Awaitility;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly;
+
+@Category({ClusterIT.class})
+@RunWith(IoTDBTestRunner.class)
+public class IoTDBRegionReconstructForIoTV1IT extends IoTDBRegionOperationReliabilityITFramework {
+ private static final String RECONSTRUCT_FORMAT = "reconstruct region %d on %d";
+ private static Logger LOGGER = LoggerFactory.getLogger(IoTDBRegionReconstructForIoTV1IT.class);
+
+ @Test
+ public void normal1C3DTest() throws Exception {
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setDataReplicationFactor(2)
+ .setSchemaReplicationFactor(3);
+
+ EnvFactory.getEnv().initClusterEnvironment(1, 3);
+
+ try (Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection());
+ Statement statement = makeItCloseQuietly(connection.createStatement());
+ SyncConfigNodeIServiceClient client =
+ (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) {
+ // prepare data
+ statement.execute(INSERTION1);
+ statement.execute(FLUSH_COMMAND);
+
+ // collect necessary information
+ Map> dataRegionMap = getDataRegionMap(statement);
+ Set allDataNodeId = getAllDataNodes(statement);
+
+ // select datanode
+ final int selectedRegion = 1;
+ Assert.assertTrue(dataRegionMap.containsKey(selectedRegion));
+ Assert.assertEquals(2, dataRegionMap.get(selectedRegion).size());
+ Iterator iterator = dataRegionMap.get(selectedRegion).iterator();
+ final int dataNodeToBeClosed = iterator.next();
+ final int dataNodeToBeReconstructed = iterator.next();
+ final int dataNodeAlwaysGood =
+ allDataNodeId.stream()
+ .filter(x -> x != dataNodeToBeReconstructed && x != dataNodeToBeClosed)
+ .findAny()
+ .get();
+ final DataNodeWrapper dataNodeWrapper =
+ EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeAlwaysGood).get();
+ Session session =
+ new Session.Builder()
+ .host(dataNodeWrapper.getIp())
+ .port(dataNodeWrapper.getPort())
+ .build();
+ session.open();
+
+ // delete one DataNode's data dir, stop another DataNode
+ File dataDirToBeReconstructed =
+ new File(
+ EnvFactory.getEnv()
+ .dataNodeIdToWrapper(dataNodeToBeReconstructed)
+ .get()
+ .getDataPath());
+ FileUtils.deleteDirectory(dataDirToBeReconstructed);
+ EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeToBeClosed).get().stopForcibly();
+
+ // now, the query should throw exception
+ Assert.assertThrows(
+ StatementExecutionException.class,
+ () -> session.executeQueryStatement("select * from root.**"));
+
+ // start DataNode, reconstruct the delete one
+ EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeToBeClosed).get().start();
+ EnvFactory.getAbstractEnv().checkNodeInStatus(dataNodeToBeClosed, NodeStatus.Running);
+ session.executeNonQueryStatement(
+ String.format(RECONSTRUCT_FORMAT, selectedRegion, dataNodeToBeReconstructed));
+ try {
+ Awaitility.await()
+ .pollInterval(1, TimeUnit.SECONDS)
+ .atMost(1, TimeUnit.MINUTES)
+ .until(
+ () ->
+ getRegionStatusWithoutRunning(session).isEmpty()
+ && dataDirToBeReconstructed.getAbsoluteFile().exists());
+ } catch (Exception e) {
+ LOGGER.error(
+ "Two factor: {} && {}",
+ getRegionStatusWithoutRunning(session).isEmpty(),
+ dataDirToBeReconstructed.getAbsoluteFile().exists());
+ Assert.fail();
+ }
+ EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeToBeClosed).get().stopForcibly();
+
+ // now, the query should work fine
+ SessionDataSet resultSet = session.executeQueryStatement("select * from root.**");
+ RowRecord rowRecord = resultSet.next();
+ Assert.assertEquals("2.0", rowRecord.getFields().get(0).getStringValue());
+ Assert.assertEquals("1.0", rowRecord.getFields().get(1).getStringValue());
+ }
+ }
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java
index 6819af9f87b3f..e4c99e22c1fa9 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateDataNodeCrashIT.java
@@ -21,7 +21,7 @@
import org.apache.iotdb.commons.utils.KillPoint.DataNodeKillPoints;
import org.apache.iotdb.commons.utils.KillPoint.KillNode;
-import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateReliabilityITFramework;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
import org.apache.iotdb.it.framework.IoTDBTestRunner;
import org.apache.iotdb.itbase.category.DailyIT;
@@ -31,7 +31,7 @@
@Category({DailyIT.class})
@RunWith(IoTDBTestRunner.class)
-public class IoTDBRegionMigrateDataNodeCrashIT extends IoTDBRegionMigrateReliabilityITFramework {
+public class IoTDBRegionMigrateDataNodeCrashIT extends IoTDBRegionOperationReliabilityITFramework {
// region Coordinator DataNode crash tests
private final int dataReplicateFactor = 2;
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java
index 047037c7d147e..2334bd6938513 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/regionmigration/pass/datanodecrash/IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT.java
@@ -20,7 +20,7 @@
package org.apache.iotdb.confignode.it.regionmigration.pass.datanodecrash;
import org.apache.iotdb.commons.utils.KillPoint.IoTConsensusDeleteLocalPeerKillPoints;
-import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionMigrateDataNodeCrashITFramework;
+import org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework;
import org.apache.iotdb.it.framework.IoTDBTestRunner;
import org.apache.iotdb.itbase.category.DailyIT;
@@ -28,10 +28,12 @@
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
+import static org.junit.platform.commons.function.Try.success;
+
@Category({DailyIT.class})
@RunWith(IoTDBTestRunner.class)
public class IoTDBRegionMigrateOriginalCrashWhenDeleteLocalPeerIT
- extends IoTDBRegionMigrateDataNodeCrashITFramework {
+ extends IoTDBRegionOperationReliabilityITFramework {
@Test
public void crashBeforeDelete() throws Exception {
success(IoTConsensusDeleteLocalPeerKillPoints.BEFORE_DELETE);
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeITFramework.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeITFramework.java
new file mode 100644
index 0000000000000..561e99ec3da39
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeITFramework.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.it.removeconfignode;
+
+import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient;
+import org.apache.iotdb.confignode.it.removedatanode.SQLModel;
+import org.apache.iotdb.consensus.ConsensusFactory;
+import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant;
+import org.apache.iotdb.it.env.EnvFactory;
+import org.apache.iotdb.itbase.exception.InconsistentDataException;
+import org.apache.iotdb.jdbc.IoTDBSQLException;
+
+import org.awaitility.Awaitility;
+import org.awaitility.core.ConditionTimeoutException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework.getDataRegionMap;
+import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly;
+
+public class IoTDBRemoveConfigNodeITFramework {
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(IoTDBRemoveConfigNodeITFramework.class);
+ private static final String TREE_MODEL_INSERTION =
+ "INSERT INTO root.sg.d1(timestamp,speed,temperature) values(100, 1, 2)";
+
+ private static final String SHOW_CONFIGNODES = "show confignodes";
+
+ private static final String defaultSchemaRegionGroupExtensionPolicy = "CUSTOM";
+ private static final String defaultDataRegionGroupExtensionPolicy = "CUSTOM";
+
+ @Before
+ public void setUp() throws Exception {
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setSchemaRegionGroupExtensionPolicy(defaultSchemaRegionGroupExtensionPolicy)
+ .setDataRegionGroupExtensionPolicy(defaultDataRegionGroupExtensionPolicy);
+ }
+
+ @After
+ public void tearDown() throws InterruptedException {
+ EnvFactory.getEnv().cleanClusterEnvironment();
+ }
+
+ public void testRemoveConfigNode(
+ final int dataReplicateFactor,
+ final int schemaReplicationFactor,
+ final int configNodeNum,
+ final int dataNodeNum,
+ final int dataRegionPerDataNode,
+ final SQLModel model)
+ throws Exception {
+
+ // Set up the environment
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setSchemaReplicationFactor(schemaReplicationFactor)
+ .setDataReplicationFactor(dataReplicateFactor)
+ .setDefaultDataRegionGroupNumPerDatabase(
+ dataRegionPerDataNode * dataNodeNum / dataReplicateFactor);
+ EnvFactory.getEnv().initClusterEnvironment(configNodeNum, dataNodeNum);
+
+ try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection());
+ final Statement statement = makeItCloseQuietly(connection.createStatement());
+ SyncConfigNodeIServiceClient client =
+ (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) {
+
+ // Insert data in tree model
+ statement.execute(TREE_MODEL_INSERTION);
+
+ Map> regionMap = getDataRegionMap(statement);
+ regionMap.forEach(
+ (key, valueSet) -> {
+ LOGGER.info("Key: {}, Value: {}", key, valueSet);
+ if (valueSet.size() != dataReplicateFactor) {
+ Assert.fail();
+ }
+ });
+
+ // Get all config nodes
+ ResultSet result = statement.executeQuery(SHOW_CONFIGNODES);
+ Set allConfigNodeId = new HashSet<>();
+ while (result.next()) {
+ allConfigNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID));
+ }
+
+ AtomicReference clientRef = new AtomicReference<>(client);
+
+ int removeConfigNodeId = allConfigNodeId.iterator().next();
+ String removeConfigNodeSQL = generateRemoveString(removeConfigNodeId);
+ LOGGER.info("Remove ConfigNodes SQL: {}", removeConfigNodeSQL);
+ try {
+ statement.execute(removeConfigNodeSQL);
+ } catch (IoTDBSQLException e) {
+ LOGGER.error("Remove ConfigNodes SQL execute fail: {}", e.getMessage());
+ Assert.fail();
+ }
+ LOGGER.info("Remove ConfigNodes SQL submit successfully.");
+
+ // Wait until success
+ try {
+ awaitUntilSuccess(statement, removeConfigNodeId);
+ } catch (ConditionTimeoutException e) {
+ LOGGER.error("Remove ConfigNodes timeout in 2 minutes");
+ Assert.fail();
+ }
+
+ LOGGER.info("Remove ConfigNodes success");
+ } catch (InconsistentDataException e) {
+ LOGGER.error("Unexpected error:", e);
+ }
+ }
+
+ private static void awaitUntilSuccess(Statement statement, int removeConfigNodeId) {
+ AtomicReference> lastTimeConfigNodes = new AtomicReference<>();
+ AtomicReference lastException = new AtomicReference<>();
+
+ try {
+ Awaitility.await()
+ .atMost(2, TimeUnit.MINUTES)
+ .pollDelay(2, TimeUnit.SECONDS)
+ .until(
+ () -> {
+ try {
+ // Get all config nodes
+ ResultSet result = statement.executeQuery(SHOW_CONFIGNODES);
+ Set allConfigNodeId = new HashSet<>();
+ while (result.next()) {
+ allConfigNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID));
+ }
+ lastTimeConfigNodes.set(allConfigNodeId);
+ return !allConfigNodeId.contains(removeConfigNodeId);
+ } catch (Exception e) {
+ // Any exception can be ignored
+ lastException.set(e);
+ return false;
+ }
+ });
+ } catch (ConditionTimeoutException e) {
+ if (lastTimeConfigNodes.get() == null) {
+ LOGGER.error(
+ "Maybe show confignodes fail, lastTimeConfigNodes is null, last Exception:",
+ lastException.get());
+ throw e;
+ }
+ String actualSetStr = lastTimeConfigNodes.get().toString();
+ lastTimeConfigNodes.get().remove(removeConfigNodeId);
+ String expectedSetStr = lastTimeConfigNodes.get().toString();
+ LOGGER.error(
+ "Remove ConfigNode timeout in 2 minutes, expected set: {}, actual set: {}",
+ expectedSetStr,
+ actualSetStr);
+ if (lastException.get() == null) {
+ LOGGER.info("No exception during awaiting");
+ } else {
+ LOGGER.error("Last exception during awaiting:", lastException.get());
+ }
+ throw e;
+ }
+ }
+
+ public static String generateRemoveString(Integer configNodeId) {
+ return "remove confignode " + configNodeId;
+ }
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeNormalIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeNormalIT.java
new file mode 100644
index 0000000000000..44a1100672e5e
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removeconfignode/IoTDBRemoveConfigNodeNormalIT.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.it.removeconfignode;
+
+import org.apache.iotdb.confignode.it.removedatanode.SQLModel;
+import org.apache.iotdb.it.framework.IoTDBTestRunner;
+import org.apache.iotdb.itbase.category.ClusterIT;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+@Category({ClusterIT.class})
+@RunWith(IoTDBTestRunner.class)
+public class IoTDBRemoveConfigNodeNormalIT extends IoTDBRemoveConfigNodeITFramework {
+ @Test
+ public void test3C1DUseTreeSQL() throws Exception {
+ testRemoveConfigNode(1, 1, 3, 1, 2, SQLModel.TREE_MODEL_SQL);
+ }
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeITFramework.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeITFramework.java
new file mode 100644
index 0000000000000..73d5fd6fac353
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeITFramework.java
@@ -0,0 +1,402 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.it.removedatanode;
+
+import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration;
+import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
+import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient;
+import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRemoveReq;
+import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRemoveResp;
+import org.apache.iotdb.consensus.ConsensusFactory;
+import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant;
+import org.apache.iotdb.it.env.EnvFactory;
+import org.apache.iotdb.it.env.cluster.node.DataNodeWrapper;
+import org.apache.iotdb.itbase.exception.InconsistentDataException;
+import org.apache.iotdb.jdbc.IoTDBSQLException;
+import org.apache.iotdb.rpc.TSStatusCode;
+
+import org.apache.thrift.TException;
+import org.awaitility.Awaitility;
+import org.awaitility.core.ConditionTimeoutException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
+
+import static org.apache.iotdb.confignode.it.regionmigration.IoTDBRegionOperationReliabilityITFramework.getDataRegionMap;
+import static org.apache.iotdb.util.MagicUtils.makeItCloseQuietly;
+
+public class IoTDBRemoveDataNodeITFramework {
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(IoTDBRemoveDataNodeITFramework.class);
+ private static final String TREE_MODEL_INSERTION =
+ "INSERT INTO root.sg.d1(timestamp,speed,temperature) values(100, 1, 2)";
+
+ private static final String SHOW_REGIONS = "show regions";
+ private static final String SHOW_DATANODES = "show datanodes";
+
+ private static final String defaultSchemaRegionGroupExtensionPolicy = "CUSTOM";
+ private static final String defaultDataRegionGroupExtensionPolicy = "CUSTOM";
+
+ @Before
+ public void setUp() throws Exception {
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setConfigNodeConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setSchemaRegionConsensusProtocolClass(ConsensusFactory.RATIS_CONSENSUS)
+ .setDataRegionConsensusProtocolClass(ConsensusFactory.IOT_CONSENSUS)
+ .setSchemaRegionGroupExtensionPolicy(defaultSchemaRegionGroupExtensionPolicy)
+ .setDataRegionGroupExtensionPolicy(defaultDataRegionGroupExtensionPolicy);
+ }
+
+ @After
+ public void tearDown() throws InterruptedException {
+ EnvFactory.getEnv().cleanClusterEnvironment();
+ }
+
+ public void successTest(
+ final int dataReplicateFactor,
+ final int schemaReplicationFactor,
+ final int configNodeNum,
+ final int dataNodeNum,
+ final int removeDataNodeNum,
+ final int dataRegionPerDataNode,
+ final boolean rejoinRemovedDataNode,
+ final SQLModel model)
+ throws Exception {
+ testRemoveDataNode(
+ dataReplicateFactor,
+ schemaReplicationFactor,
+ configNodeNum,
+ dataNodeNum,
+ removeDataNodeNum,
+ dataRegionPerDataNode,
+ true,
+ rejoinRemovedDataNode,
+ model);
+ }
+
+ public void failTest(
+ final int dataReplicateFactor,
+ final int schemaReplicationFactor,
+ final int configNodeNum,
+ final int dataNodeNum,
+ final int removeDataNodeNum,
+ final int dataRegionPerDataNode,
+ final boolean rejoinRemovedDataNode,
+ final SQLModel model)
+ throws Exception {
+ testRemoveDataNode(
+ dataReplicateFactor,
+ schemaReplicationFactor,
+ configNodeNum,
+ dataNodeNum,
+ removeDataNodeNum,
+ dataRegionPerDataNode,
+ false,
+ rejoinRemovedDataNode,
+ model);
+ }
+
+ public void testRemoveDataNode(
+ final int dataReplicateFactor,
+ final int schemaReplicationFactor,
+ final int configNodeNum,
+ final int dataNodeNum,
+ final int removeDataNodeNum,
+ final int dataRegionPerDataNode,
+ final boolean expectRemoveSuccess,
+ final boolean rejoinRemovedDataNode,
+ final SQLModel model)
+ throws Exception {
+ // Set up the environment
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setSchemaReplicationFactor(schemaReplicationFactor)
+ .setDataReplicationFactor(dataReplicateFactor)
+ .setDefaultDataRegionGroupNumPerDatabase(
+ dataRegionPerDataNode * dataNodeNum / dataReplicateFactor);
+ EnvFactory.getEnv().initClusterEnvironment(configNodeNum, dataNodeNum);
+
+ try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection());
+ final Statement statement = makeItCloseQuietly(connection.createStatement());
+ SyncConfigNodeIServiceClient client =
+ (SyncConfigNodeIServiceClient) EnvFactory.getEnv().getLeaderConfigNodeConnection()) {
+
+ // Insert data in tree model
+ statement.execute(TREE_MODEL_INSERTION);
+
+ Map> regionMap = getDataRegionMap(statement);
+ regionMap.forEach(
+ (key, valueSet) -> {
+ LOGGER.info("Key: {}, Value: {}", key, valueSet);
+ if (valueSet.size() != dataReplicateFactor) {
+ Assert.fail();
+ }
+ });
+
+ // Get all data nodes
+ ResultSet result = statement.executeQuery(SHOW_DATANODES);
+ Set allDataNodeId = new HashSet<>();
+ while (result.next()) {
+ allDataNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID));
+ }
+
+ // Select data nodes to remove
+ final Set removeDataNodes =
+ selectRemoveDataNodes(allDataNodeId, regionMap, removeDataNodeNum);
+
+ List removeDataNodeWrappers =
+ removeDataNodes.stream()
+ .map(dataNodeId -> EnvFactory.getEnv().dataNodeIdToWrapper(dataNodeId).get())
+ .collect(Collectors.toList());
+
+ AtomicReference clientRef = new AtomicReference<>(client);
+ List removeDataNodeLocations =
+ clientRef
+ .get()
+ .getDataNodeConfiguration(-1)
+ .getDataNodeConfigurationMap()
+ .values()
+ .stream()
+ .map(TDataNodeConfiguration::getLocation)
+ .filter(location -> removeDataNodes.contains(location.getDataNodeId()))
+ .collect(Collectors.toList());
+ if (SQLModel.NOT_USE_SQL.equals(model)) {
+ TDataNodeRemoveReq removeReq = new TDataNodeRemoveReq(removeDataNodeLocations);
+
+ // Remove data nodes
+ TDataNodeRemoveResp removeResp = clientRef.get().removeDataNode(removeReq);
+ LOGGER.info("Submit Remove DataNodes result {} ", removeResp);
+ if (removeResp.getStatus().getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) {
+ if (expectRemoveSuccess) {
+ LOGGER.error("Submit Remove DataNodes fail");
+ Assert.fail();
+ } else {
+ LOGGER.info("Submit Remove DataNodes fail, as expected.");
+ return;
+ }
+ }
+ LOGGER.info("Submit Remove DataNodes request: {}", removeReq);
+
+ } else {
+ String removeDataNodeSQL = generateRemoveString(removeDataNodes);
+ LOGGER.info("Remove DataNodes SQL: {}", removeDataNodeSQL);
+ try {
+ statement.execute(removeDataNodeSQL);
+ } catch (IoTDBSQLException e) {
+ if (expectRemoveSuccess) {
+ LOGGER.error("Remove DataNodes SQL execute fail: {}", e.getMessage());
+ Assert.fail();
+ } else {
+ LOGGER.info("Submit Remove DataNodes fail, as expected");
+ return;
+ }
+ }
+ LOGGER.info("Remove DataNodes SQL submit successfully.");
+ }
+
+ // Wait until success
+ boolean removeSuccess = false;
+ try {
+ awaitUntilSuccess(clientRef, removeDataNodeLocations);
+ removeSuccess = true;
+ } catch (ConditionTimeoutException e) {
+ if (expectRemoveSuccess) {
+ LOGGER.error("Remove DataNodes timeout in 2 minutes");
+ Assert.fail();
+ }
+ }
+
+ if (!expectRemoveSuccess && removeSuccess) {
+ LOGGER.error("Remove DataNodes success, but expect fail");
+ Assert.fail();
+ }
+
+ LOGGER.info("Remove DataNodes success");
+
+ if (rejoinRemovedDataNode) {
+ try {
+ // Use sleep and restart to ensure that removeDataNodes restarts successfully
+ Thread.sleep(30000);
+ restartDataNodes(removeDataNodeWrappers);
+ LOGGER.info("RemoveDataNodes:{} rejoined successfully.", removeDataNodes);
+ } catch (Exception e) {
+ LOGGER.error("RemoveDataNodes rejoin failed.");
+ Assert.fail();
+ }
+ }
+ } catch (InconsistentDataException e) {
+ LOGGER.error("Unexpected error:", e);
+ }
+
+ try (final Connection connection = makeItCloseQuietly(EnvFactory.getEnv().getConnection());
+ final Statement statement = makeItCloseQuietly(connection.createStatement())) {
+
+ // Check the data region distribution after removing data nodes
+ Map> afterRegionMap = getDataRegionMap(statement);
+ afterRegionMap.forEach(
+ (key, valueSet) -> {
+ LOGGER.info("Key: {}, Value: {}", key, valueSet);
+ if (valueSet.size() != dataReplicateFactor) {
+ Assert.fail();
+ }
+ });
+
+ if (rejoinRemovedDataNode) {
+ ResultSet result = statement.executeQuery(SHOW_DATANODES);
+ Set allDataNodeId = new HashSet<>();
+ while (result.next()) {
+ allDataNodeId.add(result.getInt(ColumnHeaderConstant.NODE_ID));
+ }
+ Assert.assertEquals(allDataNodeId.size(), dataNodeNum);
+ }
+ } catch (InconsistentDataException e) {
+ LOGGER.error("Unexpected error:", e);
+ }
+ }
+
+ private static Set selectRemoveDataNodes(
+ Set allDataNodeId, Map> regionMap, int removeDataNodeNum) {
+ Set removeDataNodeIds = new HashSet<>();
+ for (int i = 0; i < removeDataNodeNum; i++) {
+ int removeDataNodeId = allDataNodeId.iterator().next();
+ removeDataNodeIds.add(removeDataNodeId);
+ allDataNodeId.remove(removeDataNodeId);
+ }
+ return removeDataNodeIds;
+ }
+
+ private static void awaitUntilSuccess(
+ AtomicReference clientRef,
+ List removeDataNodeLocations) {
+ AtomicReference> lastTimeDataNodeLocations = new AtomicReference<>();
+ AtomicReference lastException = new AtomicReference<>();
+
+ try {
+ Awaitility.await()
+ .atMost(2, TimeUnit.MINUTES)
+ .pollDelay(2, TimeUnit.SECONDS)
+ .until(
+ () -> {
+ try {
+ List remainingDataNodes =
+ clientRef
+ .get()
+ .getDataNodeConfiguration(-1)
+ .getDataNodeConfigurationMap()
+ .values()
+ .stream()
+ .map(TDataNodeConfiguration::getLocation)
+ .collect(Collectors.toList());
+ lastTimeDataNodeLocations.set(remainingDataNodes);
+ for (TDataNodeLocation location : removeDataNodeLocations) {
+ if (remainingDataNodes.contains(location)) {
+ return false;
+ }
+ }
+ return true;
+ } catch (TException e) {
+ clientRef.set(
+ (SyncConfigNodeIServiceClient)
+ EnvFactory.getEnv().getLeaderConfigNodeConnection());
+ lastException.set(e);
+ return false;
+ } catch (Exception e) {
+ // Any exception can be ignored
+ lastException.set(e);
+ return false;
+ }
+ });
+ } catch (ConditionTimeoutException e) {
+ if (lastTimeDataNodeLocations.get() == null) {
+ LOGGER.error(
+ "Maybe getDataNodeConfiguration fail, lastTimeDataNodeLocations is null, last Exception:",
+ lastException.get());
+ throw e;
+ }
+ String actualSetStr = lastTimeDataNodeLocations.get().toString();
+ lastTimeDataNodeLocations.get().removeAll(removeDataNodeLocations);
+ String expectedSetStr = lastTimeDataNodeLocations.get().toString();
+ LOGGER.error(
+ "Remove DataNodes timeout in 2 minutes, expected set: {}, actual set: {}",
+ expectedSetStr,
+ actualSetStr);
+ if (lastException.get() == null) {
+ LOGGER.info("No exception during awaiting");
+ } else {
+ LOGGER.error("Last exception during awaiting:", lastException.get());
+ }
+ throw e;
+ }
+
+ LOGGER.info("DataNodes has been successfully changed to {}", lastTimeDataNodeLocations.get());
+ }
+
+ public void restartDataNodes(List dataNodeWrappers) {
+ dataNodeWrappers.parallelStream()
+ .forEach(
+ nodeWrapper -> {
+ nodeWrapper.stopForcibly();
+ Awaitility.await()
+ .atMost(1, TimeUnit.MINUTES)
+ .pollDelay(2, TimeUnit.SECONDS)
+ .until(() -> !nodeWrapper.isAlive());
+ LOGGER.info("Node {} stopped.", nodeWrapper.getId());
+ nodeWrapper.start();
+ Awaitility.await()
+ .atMost(1, TimeUnit.MINUTES)
+ .pollDelay(2, TimeUnit.SECONDS)
+ .until(nodeWrapper::isAlive);
+ try {
+ TimeUnit.SECONDS.sleep(10);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ LOGGER.info("Node {} restarted.", nodeWrapper.getId());
+ });
+ }
+
+ public static String generateRemoveString(Set dataNodes) {
+ StringBuilder sb = new StringBuilder("remove datanode ");
+
+ for (Integer node : dataNodes) {
+ sb.append(node).append(", ");
+ }
+
+ sb.setLength(sb.length() - 2);
+
+ return sb.toString();
+ }
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeNormalIT.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeNormalIT.java
new file mode 100644
index 0000000000000..517d8ee4cad67
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/IoTDBRemoveDataNodeNormalIT.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.it.removedatanode;
+
+import org.apache.iotdb.it.framework.IoTDBTestRunner;
+import org.apache.iotdb.itbase.category.ClusterIT;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+@Category({ClusterIT.class})
+@RunWith(IoTDBTestRunner.class)
+public class IoTDBRemoveDataNodeNormalIT extends IoTDBRemoveDataNodeITFramework {
+
+ @Test
+ public void success1C4DTest() throws Exception {
+ successTest(2, 3, 1, 4, 1, 2, true, SQLModel.NOT_USE_SQL);
+ }
+
+ @Test
+ public void fail1C3DTest() throws Exception {
+ failTest(2, 3, 1, 3, 1, 2, false, SQLModel.NOT_USE_SQL);
+ }
+
+ @Test
+ public void success1C4DTestUseSQL() throws Exception {
+ successTest(2, 3, 1, 4, 1, 2, true, SQLModel.TREE_MODEL_SQL);
+ }
+
+ @Test
+ public void fail1C3DTestUseSQL() throws Exception {
+ failTest(2, 3, 1, 3, 1, 2, false, SQLModel.TREE_MODEL_SQL);
+ }
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/SQLModel.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/SQLModel.java
new file mode 100644
index 0000000000000..0963072d44cae
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/removedatanode/SQLModel.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.confignode.it.removedatanode;
+
+public enum SQLModel {
+ NOT_USE_SQL,
+
+ TREE_MODEL_SQL,
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java b/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java
index 76feab5efe32d..5f9da5ac36a83 100644
--- a/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java
+++ b/integration-test/src/test/java/org/apache/iotdb/confignode/it/utils/ConfigNodeTestUtils.java
@@ -323,8 +323,8 @@ public static TClusterParameters generateClusterParameters() {
clusterParameters.setTimePartitionInterval(604800000);
clusterParameters.setDataReplicationFactor(1);
clusterParameters.setSchemaReplicationFactor(1);
- clusterParameters.setDataRegionPerDataNode(5.0);
- clusterParameters.setSchemaRegionPerDataNode(1.0);
+ clusterParameters.setDataRegionPerDataNode(0);
+ clusterParameters.setSchemaRegionPerDataNode(1);
clusterParameters.setDiskSpaceWarningThreshold(0.01);
clusterParameters.setReadConsistencyLevel("strong");
clusterParameters.setTimestampPrecision("ms");
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java
index bb3a8de7d12f4..620043e6c7b7b 100644
--- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDatetimeFormatIT.java
@@ -128,5 +128,14 @@ public void testBigDateTime() {
e.printStackTrace();
fail();
}
+ try (Connection connection = EnvFactory.getEnv().getConnection();
+ Statement statement = connection.createStatement()) {
+ statement.execute("insert into root.sg.d1(time,s2) values (16182830055860000000, 8.76);");
+ fail();
+ } catch (SQLException e) {
+ Assert.assertTrue(
+ e.getMessage()
+ .contains("please check whether the timestamp 16182830055860000000 is correct."));
+ }
}
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java
index 209657e516185..047bb10bfd3ca 100644
--- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBDuplicateTimeIT.java
@@ -43,8 +43,6 @@ public class IoTDBDuplicateTimeIT {
@Before
public void setUp() throws Exception {
- EnvFactory.getEnv().getConfig().getCommonConfig().setAvgSeriesPointNumberThreshold(2);
- // Adjust memstable threshold size to make it flush automatically
EnvFactory.getEnv().initClusterEnvironment();
}
@@ -62,6 +60,7 @@ public void testDuplicateTime() throws SQLException {
// version-1 tsfile
statement.execute("insert into root.db.d1(time,s1) values (2,2)");
statement.execute("insert into root.db.d1(time,s1) values (3,3)");
+ statement.execute("flush");
// version-2 unseq work memtable
statement.execute("insert into root.db.d1(time,s1) values (2,20)");
@@ -69,9 +68,11 @@ public void testDuplicateTime() throws SQLException {
// version-3 tsfile
statement.execute("insert into root.db.d1(time,s1) values (5,5)");
statement.execute("insert into root.db.d1(time,s1) values (6,6)");
+ statement.execute("flush root.db true");
// version-2 unseq work memtable -> unseq tsfile
statement.execute("insert into root.db.d1(time,s1) values (5,50)");
+ statement.execute("flush");
try (ResultSet set = statement.executeQuery("SELECT s1 FROM root.db.d1 where time = 5")) {
int cnt = 0;
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java
index aa7b7024677c0..6bcbc762fc53b 100644
--- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBEncodingIT.java
@@ -19,13 +19,17 @@
package org.apache.iotdb.db.it;
+import org.apache.iotdb.isession.ISession;
import org.apache.iotdb.it.env.EnvFactory;
import org.apache.iotdb.it.framework.IoTDBTestRunner;
import org.apache.iotdb.itbase.category.LocalStandaloneIT;
import org.apache.iotdb.rpc.TSStatusCode;
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
import org.junit.After;
-import org.junit.Before;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
@@ -36,22 +40,37 @@
import java.sql.Statement;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
@RunWith(IoTDBTestRunner.class)
@Category({LocalStandaloneIT.class})
public class IoTDBEncodingIT {
- @Before
- public void setUp() throws Exception {
+ private static final String[] databasesToClear =
+ new String[] {"root.db_0", "root.db1", "root.turbine1"};
+
+ @BeforeClass
+ public static void setUpClass() throws Exception {
EnvFactory.getEnv().initClusterEnvironment();
}
- @After
- public void tearDown() throws Exception {
+ @AfterClass
+ public static void tearDownClass() throws Exception {
EnvFactory.getEnv().cleanClusterEnvironment();
}
+ @After
+ public void tearDown() {
+ for (String database : databasesToClear) {
+ try (ISession session = EnvFactory.getEnv().getSessionConnection()) {
+ session.executeNonQueryStatement("DELETE DATABASE " + database);
+ } catch (Exception ignored) {
+
+ }
+ }
+ }
+
@Test
public void testSetEncodingRegularFailed() {
try (Connection connection = EnvFactory.getEnv().getConnection();
@@ -432,4 +451,75 @@ public void testFloatPrecision2() {
fail();
}
}
+
+ @Test
+ public void testCreateNewTypes() throws Exception {
+ String currDB = "root.db1";
+ int seriesCnt = 0;
+ TSDataType[] dataTypes =
+ new TSDataType[] {
+ TSDataType.STRING, TSDataType.BLOB, TSDataType.TIMESTAMP, TSDataType.DATE
+ };
+
+ // supported encodings
+ try (Connection connection = EnvFactory.getEnv().getConnection();
+ Statement statement = connection.createStatement()) {
+ for (TSDataType dataType : dataTypes) {
+ for (TSEncoding encoding : TSEncoding.values()) {
+ if (encoding.isSupported(dataType)) {
+ statement.execute(
+ "create timeseries "
+ + currDB
+ + ".d1.s"
+ + seriesCnt
+ + " with datatype="
+ + dataType
+ + ", encoding="
+ + encoding
+ + ", compression=SNAPPY");
+ seriesCnt++;
+ }
+ }
+ }
+
+ ResultSet resultSet = statement.executeQuery("SHOW TIMESERIES");
+
+ while (resultSet.next()) {
+ seriesCnt--;
+ }
+ assertEquals(0, seriesCnt);
+ statement.execute("DROP DATABASE " + currDB);
+ }
+
+ // unsupported encodings
+ try (Connection connection = EnvFactory.getEnv().getConnection();
+ Statement statement = connection.createStatement()) {
+ for (TSDataType dataType : dataTypes) {
+ for (TSEncoding encoding : TSEncoding.values()) {
+ if (!encoding.isSupported(dataType)) {
+ try {
+ statement.execute(
+ "create timeseries "
+ + currDB
+ + ".d1.s"
+ + seriesCnt
+ + " with datatype="
+ + dataType
+ + ", encoding="
+ + encoding
+ + ", compression=SNAPPY");
+ fail("Should have thrown an exception");
+ } catch (SQLException e) {
+ assertEquals(
+ "507: encoding " + encoding + " does not support " + dataType, e.getMessage());
+ }
+ seriesCnt++;
+ }
+ }
+ }
+
+ ResultSet resultSet = statement.executeQuery("SHOW TIMESERIES");
+ assertFalse(resultSet.next());
+ }
+ }
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFileTimeIndexIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFileTimeIndexIT.java
new file mode 100644
index 0000000000000..a0d0a083701ac
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFileTimeIndexIT.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.it;
+
+import org.apache.iotdb.it.env.EnvFactory;
+import org.apache.iotdb.it.framework.IoTDBTestRunner;
+import org.apache.iotdb.itbase.category.ClusterIT;
+import org.apache.iotdb.itbase.category.LocalStandaloneIT;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Locale;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+@RunWith(IoTDBTestRunner.class)
+@Category({LocalStandaloneIT.class, ClusterIT.class})
+public class IoTDBFileTimeIndexIT {
+
+ private static final String[] sqls =
+ new String[] {
+ "insert into root.db.d1(time,s1) values(2,2)",
+ "insert into root.db.d1(time,s1) values(3,3)",
+ "flush",
+ "insert into root.db.d2(time,s1) values(5,5)",
+ "flush",
+ "insert into root.db.d1(time,s1) values(4,4)",
+ "flush",
+ "insert into root.db.d2(time,s1) values(1,1)",
+ "insert into root.db.d1(time,s1) values(3,30)",
+ "insert into root.db.d1(time,s1) values(4,40)",
+ "flush",
+ "insert into root.db.d2(time,s1) values(2,2)",
+ "insert into root.db.d1(time,s1) values(4,400)",
+ "flush",
+ };
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ Locale.setDefault(Locale.ENGLISH);
+
+ EnvFactory.getEnv()
+ .getConfig()
+ .getCommonConfig()
+ .setDataRegionGroupExtensionPolicy("CUSTOM")
+ .setDefaultDataRegionGroupNumPerDatabase(1)
+ .setEnableSeqSpaceCompaction(false)
+ .setEnableUnseqSpaceCompaction(false)
+ .setEnableCrossSpaceCompaction(false)
+ .setQueryMemoryProportion("1:100:200:50:200:200:0:250");
+ // Adjust memstable threshold size to make it flush automatically
+ EnvFactory.getEnv().initClusterEnvironment();
+ prepareData();
+ }
+
+ private static void prepareData() {
+ try (Connection connection = EnvFactory.getEnv().getConnection();
+ Statement statement = connection.createStatement()) {
+
+ for (String sql : sqls) {
+ statement.addBatch(sql);
+ }
+ statement.executeBatch();
+ } catch (Exception e) {
+ fail(e.getMessage());
+ }
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ EnvFactory.getEnv().cleanClusterEnvironment();
+ }
+
+ @Test
+ public void testQuery() throws SQLException {
+ long[] time = {2L, 3L, 4L};
+ double[] value = {2.0f, 30.0f, 400.0f};
+
+ try (Connection connection = EnvFactory.getEnv().getConnection();
+ Statement statement = connection.createStatement();
+ ResultSet resultSet = statement.executeQuery("select s1 from root.db.d1")) {
+ int cnt = 0;
+ while (resultSet.next()) {
+ assertEquals(time[cnt], resultSet.getLong(1));
+ assertEquals(value[cnt], resultSet.getDouble(2), 0.00001);
+ cnt++;
+ }
+ assertEquals(time.length, cnt);
+ }
+ }
+}
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java
index 705f4b8d9ede7..2ddb7128d1a5c 100644
--- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFilterNullIT.java
@@ -46,7 +46,8 @@ public class IoTDBFilterNullIT {
"CREATE DATABASE root.testNullFilter",
"CREATE TIMESERIES root.testNullFilter.d1.s1 WITH DATATYPE=INT32, ENCODING=PLAIN",
"CREATE TIMESERIES root.testNullFilter.d1.s2 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN",
- "CREATE TIMESERIES root.testNullFilter.d1.s3 WITH DATATYPE=DOUBLE, ENCODING=PLAIN"
+ "CREATE TIMESERIES root.testNullFilter.d1.s3 WITH DATATYPE=DOUBLE, ENCODING=PLAIN",
+ "CREATE ALIGNED TIMESERIES root.testNullFilter.d2(s1 INT32, s2 BOOLEAN, s3 DOUBLE);"
};
private static final String[] insertSqls =
@@ -54,6 +55,9 @@ public class IoTDBFilterNullIT {
"INSERT INTO root.testNullFilter.d1(timestamp,s2,s3) " + "values(1, false, 11.1)",
"INSERT INTO root.testNullFilter.d1(timestamp,s1,s2) " + "values(2, 22, true)",
"INSERT INTO root.testNullFilter.d1(timestamp,s1,s3) " + "values(3, 23, 33.3)",
+ "INSERT INTO root.testNullFilter.d2(timestamp,s2,s3) " + "values(1, false, 11.1)",
+ "INSERT INTO root.testNullFilter.d2(timestamp,s1,s2) " + "values(2, 22, true)",
+ "INSERT INTO root.testNullFilter.d2(timestamp,s1,s2) " + "values(3, 22, false)",
};
private static void prepareData() {
@@ -128,7 +132,34 @@ public void nullFilterTest() {
assertEquals(retArray.length, count);
}
} catch (Exception e) {
- e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
+
+ @Test
+ public void inPushDownTest() {
+ String[] retArray = new String[] {"2,22,true,null", "3,22,false,null"};
+ try (Connection connectionIsNull = EnvFactory.getEnv().getConnection();
+ Statement statementIsNull = connectionIsNull.createStatement()) {
+ int count = 0;
+ try (ResultSet resultSet =
+ statementIsNull.executeQuery(
+ "select * from root.testNullFilter.d2 where s1 in (22, 23)")) {
+ while (resultSet.next()) {
+ String ans =
+ resultSet.getString(ColumnHeaderConstant.TIME)
+ + ","
+ + resultSet.getString("root.testNullFilter.d2.s1")
+ + ","
+ + resultSet.getString("root.testNullFilter.d2.s2")
+ + ","
+ + resultSet.getString("root.testNullFilter.d2.s3");
+ assertEquals(retArray[count], ans);
+ count++;
+ }
+ assertEquals(retArray.length, count);
+ }
+ } catch (Exception e) {
fail(e.getMessage());
}
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java
index c72c7e41b1253..3a73be3adb309 100644
--- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBFloatPrecisionIT.java
@@ -169,4 +169,49 @@ public void selectAllSQLTest() {
fail(e.getMessage());
}
}
+
+ @Test
+ public void bigFloatNumberTest2() {
+ try (Connection connection = EnvFactory.getEnv().getConnection();
+ Statement statement = connection.createStatement()) {
+ float[] floats = new float[] {6.5536403E8F, 3.123456768E20F, Float.NaN};
+ double[] doubles = new double[] {9.223372036854E18, 9.223372036854E100, Double.NaN};
+
+ statement.execute("create timeseries root.sg.d1.s1 with datatype=float, encoding=rle");
+ statement.execute("create timeseries root.sg.d1.s2 with datatype=double, encoding=rle");
+ statement.execute(
+ "insert into root.sg.d1(time, s1, s2) values (1, 6.5536403E8, 9.223372036854E18)");
+ statement.execute(
+ "insert into root.sg.d1(time, s1, s2) values (2, 3.123456768E20, 9.223372036854E100)");
+ statement.execute("insert into root.sg.d1(time, s1, s2) values (3, NaN, NaN)");
+
+ int cnt;
+ try (ResultSet resultSet = statement.executeQuery("select s1, s2 from root.sg.d1")) {
+ assertNotNull(resultSet);
+ cnt = 0;
+ while (resultSet.next()) {
+ assertEquals(floats[cnt], resultSet.getFloat("root.sg.d1.s1"), DELTA_FLOAT);
+ assertEquals(doubles[cnt], resultSet.getDouble("root.sg.d1.s2"), DELTA_DOUBLE);
+ cnt++;
+ }
+ assertEquals(3, cnt);
+ }
+
+ statement.execute("flush");
+
+ try (ResultSet resultSet = statement.executeQuery("select s1, s2 from root.sg.d1")) {
+ assertNotNull(resultSet);
+ cnt = 0;
+ while (resultSet.next()) {
+ assertEquals(floats[cnt], resultSet.getFloat("root.sg.d1.s1"), DELTA_FLOAT);
+ assertEquals(doubles[cnt], resultSet.getDouble("root.sg.d1.s2"), DELTA_DOUBLE);
+ cnt++;
+ }
+ assertEquals(3, cnt);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail(e.getMessage());
+ }
+ }
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java
index 7ebbcfc8472b3..6e52368be4ce4 100644
--- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertMultiRowIT.java
@@ -56,7 +56,7 @@ public class IoTDBInsertMultiRowIT {
@BeforeClass
public static void setUp() throws Exception {
- EnvFactory.getEnv().getConfig().getCommonConfig().setMaxInnerCompactionCandidateFileNum(2);
+ EnvFactory.getEnv().getConfig().getCommonConfig().setInnerCompactionCandidateFileNum(2);
EnvFactory.getEnv().initClusterEnvironment();
initCreateSQLStatement();
insertData();
@@ -171,6 +171,13 @@ public void testInsertMultiRowWithWrongTimestampPrecision() {
} catch (SQLException e) {
assertTrue(e.getMessage().contains("Current system timestamp precision is ms"));
}
+ try (Statement st1 = connection.createStatement()) {
+ st1.execute(
+ "insert into root.t1.d99.wt01(timestamp, s1, s2) values(-1618283005586000, 1, 1), (-1618283005586001, 1, 2)");
+ fail();
+ } catch (SQLException e) {
+ assertTrue(e.getMessage().contains("Current system timestamp precision is ms"));
+ }
}
@Test
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java
index d237423c52f41..3ebefbba2026d 100644
--- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBInsertWithQueryIT.java
@@ -52,6 +52,7 @@ public class IoTDBInsertWithQueryIT {
@Before
public void setUp() throws Exception {
+ EnvFactory.getEnv().getConfig().getCommonConfig().setTimestampPrecisionCheckEnabled(false);
EnvFactory.getEnv().initClusterEnvironment();
}
diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadLastCacheIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadLastCacheIT.java
new file mode 100644
index 0000000000000..a005f80d19df2
--- /dev/null
+++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBLoadLastCacheIT.java
@@ -0,0 +1,556 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.db.it;
+
+import org.apache.iotdb.commons.path.PartialPath;
+import org.apache.iotdb.db.queryengine.common.header.ColumnHeaderConstant;
+import org.apache.iotdb.db.queryengine.plan.relational.metadata.fetcher.cache.LastCacheLoadStrategy;
+import org.apache.iotdb.it.env.EnvFactory;
+import org.apache.iotdb.it.utils.TsFileGenerator;
+import org.apache.iotdb.itbase.category.ClusterIT;
+import org.apache.iotdb.itbase.category.LocalStandaloneIT;
+import org.apache.iotdb.jdbc.IoTDBSQLException;
+
+import com.google.common.util.concurrent.RateLimiter;
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.file.metadata.PlainDeviceID;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.read.common.Path;
+import org.apache.tsfile.write.TsFileWriter;
+import org.apache.tsfile.write.record.Tablet;
+import org.apache.tsfile.write.schema.IMeasurementSchema;
+import org.apache.tsfile.write.schema.MeasurementSchema;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.nio.file.Files;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.Objects;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+@SuppressWarnings({"ResultOfMethodCallIgnored", "UnstableApiUsage"})
+@RunWith(Parameterized.class)
+@Category({LocalStandaloneIT.class, ClusterIT.class})
+public class IoTDBLoadLastCacheIT {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IoTDBLoadLastCacheIT.class);
+ private static final long PARTITION_INTERVAL = 10 * 1000L;
+ private static final int connectionTimeoutInMS = (int) TimeUnit.SECONDS.toMillis(300);
+ private static final long loadTsFileAnalyzeSchemaMemorySizeInBytes = 10 * 1024L;
+
+ private File tmpDir;
+ private final LastCacheLoadStrategy lastCacheLoadStrategy;
+
+ @Parameters(name = "loadLastCacheStrategy={0}")
+ public static Collection